gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class Duration(object):
"""Duration operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_null(
self, custom_headers={}, raw=False, **operation_config):
"""
Get null duration value
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: timedelta
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/duration/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('duration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_positive_duration(
self, duration_body, custom_headers={}, raw=False, **operation_config):
"""
Put a positive duration value
:param duration_body:
:type duration_body: timedelta
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/duration/positiveduration'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(duration_body, 'duration')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_positive_duration(
self, custom_headers={}, raw=False, **operation_config):
"""
Get a positive duration value
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: timedelta
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/duration/positiveduration'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('duration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_invalid(
self, custom_headers={}, raw=False, **operation_config):
"""
Get an invalid duration value
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: timedelta
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/duration/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('duration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for network API."""
import itertools
import random
import mox
from nova.compute import flavors
from nova import context
from nova import exception
from nova import network
from nova.network import api
from nova.network import floating_ips
from nova.network import rpcapi as network_rpcapi
from nova import policy
from nova import test
from nova import utils
FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
class NetworkPolicyTestCase(test.TestCase):
def setUp(self):
super(NetworkPolicyTestCase, self).setUp()
policy.reset()
policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(NetworkPolicyTestCase, self).tearDown()
policy.reset()
def test_check_policy(self):
self.mox.StubOutWithMock(policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
policy.enforce(self.context, 'network:get_all', target)
self.mox.ReplayAll()
api.check_policy(self.context, 'get_all')
class ApiTestCase(test.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.network_api = network.API()
self.context = context.RequestContext('fake-user',
'fake-project')
def test_allocate_for_instance_handles_macs_passed(self):
# If a macs argument is supplied to the 'nova-network' API, it is just
# ignored. This test checks that the call down to the rpcapi layer
# doesn't pass macs down: nova-network doesn't support hypervisor
# mac address limits (today anyhow).
macs = set(['ab:cd:ef:01:23:34'])
self.mox.StubOutWithMock(
self.network_api.network_rpcapi, "allocate_for_instance")
kwargs = dict(zip(['host', 'instance_id', 'project_id',
'requested_networks', 'rxtx_factor', 'vpn', 'macs',
'dhcp_options'],
itertools.repeat(mox.IgnoreArg())))
self.network_api.network_rpcapi.allocate_for_instance(
mox.IgnoreArg(), **kwargs).AndReturn([])
self.mox.ReplayAll()
inst_type = flavors.get_default_flavor()
inst_type['rxtx_factor'] = 0
sys_meta = flavors.save_flavor_info({}, inst_type)
instance = dict(id='id', uuid='uuid', project_id='project_id',
host='host', system_metadata=utils.dict_to_metadata(sys_meta))
self.network_api.allocate_for_instance(
self.context, instance, 'vpn', 'requested_networks', macs=macs)
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic."""
new_instance = {'uuid': 'new-uuid'}
def fake_associate(*args, **kwargs):
return orig_instance_uuid
self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip',
fake_associate)
def fake_instance_get_by_uuid(context, instance_uuid):
return {'uuid': instance_uuid}
self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
def fake_get_nw_info(ctxt, instance):
class FakeNWInfo(object):
def json(self):
pass
return FakeNWInfo()
self.stubs.Set(self.network_api, '_get_instance_nw_info',
fake_get_nw_info)
if orig_instance_uuid:
expected_updated_instances = [new_instance['uuid'],
orig_instance_uuid]
else:
expected_updated_instances = [new_instance['uuid']]
def fake_instance_info_cache_update(context, instance_uuid, cache):
self.assertEqual(instance_uuid,
expected_updated_instances.pop())
self.stubs.Set(self.network_api.db, 'instance_info_cache_update',
fake_instance_info_cache_update)
self.network_api.associate_floating_ip(self.context,
new_instance,
'172.24.4.225',
'10.0.0.2')
def test_associate_preassociated_floating_ip(self):
self._do_test_associate_floating_ip('orig-uuid')
def test_associate_unassociated_floating_ip(self):
self._do_test_associate_floating_ip(None)
def _stub_migrate_instance_calls(self, method, multi_host, info):
fake_instance_type = flavors.get_default_flavor()
fake_instance_type['rxtx_factor'] = 1.21
sys_meta = utils.dict_to_metadata(
flavors.save_flavor_info({}, fake_instance_type))
fake_instance = {'uuid': 'fake_uuid',
'instance_type_id': fake_instance_type['id'],
'project_id': 'fake_project_id',
'system_metadata': sys_meta}
fake_migration = {'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest'}
def fake_mig_inst_method(*args, **kwargs):
info['kwargs'] = kwargs
def fake_is_multi_host(*args, **kwargs):
return multi_host
def fake_get_floaters(*args, **kwargs):
return ['fake_float1', 'fake_float2']
self.stubs.Set(network_rpcapi.NetworkAPI, method,
fake_mig_inst_method)
self.stubs.Set(self.network_api, '_is_multi_host',
fake_is_multi_host)
self.stubs.Set(self.network_api, '_get_floating_ip_addresses',
fake_get_floaters)
expected = {'instance_uuid': 'fake_uuid',
'source_compute': 'fake_compute_source',
'dest_compute': 'fake_compute_dest',
'rxtx_factor': 1.21,
'project_id': 'fake_project_id',
'floating_addresses': None}
if multi_host:
expected['floating_addresses'] = ['fake_float1', 'fake_float2']
return fake_instance, fake_migration, expected
def test_migrate_instance_start_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', True, info)
expected['host'] = 'fake_compute_source'
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_start_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_start', False, info)
self.network_api.migrate_instance_start(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_with_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', True, info)
expected['host'] = 'fake_compute_dest'
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_migrate_instance_finish_without_multhost(self):
info = {'kwargs': {}}
arg1, arg2, expected = self._stub_migrate_instance_calls(
'migrate_instance_finish', False, info)
self.network_api.migrate_instance_finish(self.context, arg1, arg2)
self.assertEqual(info['kwargs'], expected)
def test_is_multi_host_instance_has_no_fixed_ip(self):
def fake_fixed_ip_get_by_instance(ctxt, uuid):
raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid)
self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
fake_fixed_ip_get_by_instance)
instance = {'uuid': FAKE_UUID}
self.assertFalse(self.network_api._is_multi_host(self.context,
instance))
def test_is_multi_host_network_has_no_project_id(self):
is_multi_host = random.choice([True, False])
network = {'project_id': None,
'multi_host': is_multi_host, }
network_ref = self.network_api.db.network_create_safe(
self.context.elevated(),
network)
def fake_fixed_ip_get_by_instance(ctxt, uuid):
fixed_ip = [{'network_id': network_ref['id'],
'instance_uuid': FAKE_UUID, }]
return fixed_ip
self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
fake_fixed_ip_get_by_instance)
instance = {'uuid': FAKE_UUID}
result = self.network_api._is_multi_host(self.context, instance)
self.assertEqual(is_multi_host, result)
def test_is_multi_host_network_has_project_id(self):
is_multi_host = random.choice([True, False])
network = {'project_id': self.context.project_id,
'multi_host': is_multi_host, }
network_ref = self.network_api.db.network_create_safe(
self.context.elevated(),
network)
def fake_fixed_ip_get_by_instance(ctxt, uuid):
fixed_ip = [{'network_id': network_ref['id'],
'instance_uuid': FAKE_UUID, }]
return fixed_ip
self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
fake_fixed_ip_get_by_instance)
instance = {'uuid': FAKE_UUID}
result = self.network_api._is_multi_host(self.context, instance)
self.assertEqual(is_multi_host, result)
def test_network_disassociate_project(self):
def fake_network_disassociate(ctx, network_id, disassociate_host,
disassociate_project):
self.assertEqual(network_id, 1)
self.assertEqual(disassociate_host, False)
self.assertEqual(disassociate_project, True)
def fake_get(context, network_uuid):
return {'id': 1}
self.stubs.Set(self.network_api.db, 'network_disassociate',
fake_network_disassociate)
self.stubs.Set(self.network_api, 'get', fake_get)
self.network_api.associate(self.context, FAKE_UUID, project=None)
|
|
# RFC 2822 - style email validation for Python
# (c) 2012 Syrus Akbary <me@syrusakbary.com>
# Extended from (c) 2011 Noel Bush <noel@aitools.org>
# for support of mx and user check
# This code is made available to you under the GNU LGPL v3.
#
# This module provides a single method, valid_email_address(),
# which returns True or False to indicate whether a given address
# is valid according to the 'addr-spec' part of the specification
# given in RFC 2822. Ideally, we would like to find this
# in some other library, already thoroughly tested and well-
# maintained. The standard Python library email.utils
# contains a parse_addr() function, but it is not sufficient
# to detect many malformed addresses.
#
# This implementation aims to be faithful to the RFC, with the
# exception of a circular definition (see comments below), and
# with the omission of the pattern components marked as "obsolete".
import re
import smtplib
import logging
import socket
try:
raw_input
except NameError:
def raw_input(prompt=''):
return input(prompt)
try:
import DNS
ServerError = DNS.ServerError
DNS.DiscoverNameServers()
except ImportError:
DNS = None
class ServerError(Exception):
pass
# All we are really doing is comparing the input string to one
# gigantic regular expression. But building that regexp, and
# ensuring its correctness, is made much easier by assembling it
# from the "tokens" defined by the RFC. Each of these tokens is
# tested in the accompanying unit test file.
#
# The section of RFC 2822 from which each pattern component is
# derived is given in an accompanying comment.
#
# (To make things simple, every string below is given as 'raw',
# even when it's not strictly necessary. This way we don't forget
# when it is necessary.)
#
WSP = r'[ \t]' # see 2.2.2. Structured Header Field Bodies
CRLF = r'(?:\r\n)' # see 2.2.3. Long Header Fields
NO_WS_CTL = r'\x01-\x08\x0b\x0c\x0f-\x1f\x7f' # see 3.2.1. Primitive Tokens
QUOTED_PAIR = r'(?:\\.)' # see 3.2.2. Quoted characters
FWS = r'(?:(?:' + WSP + r'*' + CRLF + r')?' + \
WSP + r'+)' # see 3.2.3. Folding white space and comments
CTEXT = r'[' + NO_WS_CTL + \
r'\x21-\x27\x2a-\x5b\x5d-\x7e]' # see 3.2.3
CCONTENT = r'(?:' + CTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.2.3 (NB: The RFC includes COMMENT here
# as well, but that would be circular.)
COMMENT = r'\((?:' + FWS + r'?' + CCONTENT + \
r')*' + FWS + r'?\)' # see 3.2.3
CFWS = r'(?:' + FWS + r'?' + COMMENT + ')*(?:' + \
FWS + '?' + COMMENT + '|' + FWS + ')' # see 3.2.3
ATEXT = r'[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]' # see 3.2.4. Atom
ATOM = CFWS + r'?' + ATEXT + r'+' + CFWS + r'?' # see 3.2.4
DOT_ATOM_TEXT = ATEXT + r'+(?:\.' + ATEXT + r'+)*' # see 3.2.4
DOT_ATOM = CFWS + r'?' + DOT_ATOM_TEXT + CFWS + r'?' # see 3.2.4
QTEXT = r'[' + NO_WS_CTL + \
r'\x21\x23-\x5b\x5d-\x7e]' # see 3.2.5. Quoted strings
QCONTENT = r'(?:' + QTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.2.5
QUOTED_STRING = CFWS + r'?' + r'"(?:' + FWS + \
r'?' + QCONTENT + r')*' + FWS + \
r'?' + r'"' + CFWS + r'?'
LOCAL_PART = r'(?:' + DOT_ATOM + r'|' + \
QUOTED_STRING + r')' # see 3.4.1. Addr-spec specification
DTEXT = r'[' + NO_WS_CTL + r'\x21-\x5a\x5e-\x7e]' # see 3.4.1
DCONTENT = r'(?:' + DTEXT + r'|' + \
QUOTED_PAIR + r')' # see 3.4.1
DOMAIN_LITERAL = CFWS + r'?' + r'\[' + \
r'(?:' + FWS + r'?' + DCONTENT + \
r')*' + FWS + r'?\]' + CFWS + r'?' # see 3.4.1
DOMAIN = r'(?:' + DOT_ATOM + r'|' + \
DOMAIN_LITERAL + r')' # see 3.4.1
ADDR_SPEC = LOCAL_PART + r'@' + DOMAIN # see 3.4.1
# A valid address will match exactly the 3.4.1 addr-spec.
VALID_ADDRESS_REGEXP = '^' + ADDR_SPEC + '$'
MX_DNS_CACHE = {}
MX_CHECK_CACHE = {}
def get_mx_ip(hostname):
if hostname not in MX_DNS_CACHE:
try:
MX_DNS_CACHE[hostname] = DNS.mxlookup(hostname)
except ServerError as e:
if e.rcode == 3: # NXDOMAIN (Non-Existent Domain)
MX_DNS_CACHE[hostname] = None
else:
raise
return MX_DNS_CACHE[hostname]
def validate_email(email, check_mx=False, verify=False, debug=False, smtp_timeout=10):
"""Indicate whether the given string is a valid email address
according to the 'addr-spec' portion of RFC 2822 (see section
3.4.1). Parts of the spec that are marked obsolete are *not*
included in this test, and certain arcane constructions that
depend on circular definitions in the spec may not pass, but in
general this should correctly identify any email address likely
to be in use as of 2011."""
if debug:
logger = logging.getLogger('validate_email')
logger.setLevel(logging.DEBUG)
else:
logger = None
try:
assert re.match(VALID_ADDRESS_REGEXP, email) is not None
check_mx |= verify
if check_mx:
if not DNS:
raise Exception('For check the mx records or check if the email exists you must '
'have installed pyDNS python package')
hostname = email[email.find('@') + 1:]
mx_hosts = get_mx_ip(hostname)
if mx_hosts is None:
return False
for mx in mx_hosts:
try:
if not verify and mx[1] in MX_CHECK_CACHE:
return MX_CHECK_CACHE[mx[1]]
smtp = smtplib.SMTP(timeout=smtp_timeout)
smtp.connect(mx[1])
MX_CHECK_CACHE[mx[1]] = True
if not verify:
try:
smtp.quit()
except smtplib.SMTPServerDisconnected:
pass
return True
status, _ = smtp.helo()
if status != 250:
smtp.quit()
if debug:
logger.debug(u'%s answer: %s - %s', mx[1], status, _)
continue
smtp.mail('')
status, _ = smtp.rcpt(email)
if status == 250:
smtp.quit()
return True
if debug:
logger.debug(u'%s answer: %s - %s', mx[1], status, _)
smtp.quit()
except smtplib.SMTPServerDisconnected: # Server not permits verify user
if debug:
logger.debug(u'%s disconected.', mx[1])
except smtplib.SMTPConnectError:
if debug:
logger.debug(u'Unable to connect to %s.', mx[1])
return None
except AssertionError:
return False
except (ServerError, socket.error) as e:
if debug:
logger.debug('ServerError or socket.error exception raised (%s).', e)
return None
return True
if __name__ == "__main__":
import time
while True:
email = raw_input('Enter email for validation: ')
mx = raw_input('Validate MX record? [yN] ')
if mx.strip().lower() == 'y':
mx = True
else:
mx = False
validate = raw_input('Try to contact server for address validation? [yN] ')
if validate.strip().lower() == 'y':
validate = True
else:
validate = False
logging.basicConfig()
result = validate_email(email, mx, validate, debug=True, smtp_timeout=1)
if result:
print("Valid!")
elif result is None:
print("I'm not sure.")
else:
print("Invalid!")
time.sleep(1)
# import sys
# sys.modules[__name__],sys.modules['validate_email_module'] = validate_email,sys.modules[__name__]
# from validate_email_module import *
|
|
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from google.api_core import operation
from google.api_core import operations_v1
from google.longrunning import operations_pb2
from google.protobuf import struct_pb2
from google.rpc import code_pb2
from google.rpc import status_pb2
TEST_OPERATION_NAME = "test/operation"
def make_operation_proto(
name=TEST_OPERATION_NAME, metadata=None, response=None, error=None, **kwargs
):
operation_proto = operations_pb2.Operation(name=name, **kwargs)
if metadata is not None:
operation_proto.metadata.Pack(metadata)
if response is not None:
operation_proto.response.Pack(response)
if error is not None:
operation_proto.error.CopyFrom(error)
return operation_proto
def make_operation_future(client_operations_responses=None):
if client_operations_responses is None:
client_operations_responses = [make_operation_proto()]
refresh = mock.Mock(spec=["__call__"], side_effect=client_operations_responses)
refresh.responses = client_operations_responses
cancel = mock.Mock(spec=["__call__"])
operation_future = operation.Operation(
client_operations_responses[0],
refresh,
cancel,
result_type=struct_pb2.Struct,
metadata_type=struct_pb2.Struct,
)
return operation_future, refresh, cancel
def test_constructor():
future, refresh, _ = make_operation_future()
assert future.operation == refresh.responses[0]
assert future.operation.done is False
assert future.operation.name == TEST_OPERATION_NAME
assert future.metadata is None
assert future.running()
def test_metadata():
expected_metadata = struct_pb2.Struct()
future, _, _ = make_operation_future(
[make_operation_proto(metadata=expected_metadata)]
)
assert future.metadata == expected_metadata
def test_cancellation():
responses = [
make_operation_proto(),
# Second response indicates that the operation was cancelled.
make_operation_proto(
done=True, error=status_pb2.Status(code=code_pb2.CANCELLED)
),
]
future, _, cancel = make_operation_future(responses)
assert future.cancel()
assert future.cancelled()
cancel.assert_called_once_with()
# Cancelling twice should have no effect.
assert not future.cancel()
cancel.assert_called_once_with()
def test_result():
expected_result = struct_pb2.Struct()
responses = [
make_operation_proto(),
# Second operation response includes the result.
make_operation_proto(done=True, response=expected_result),
]
future, _, _ = make_operation_future(responses)
result = future.result()
assert result == expected_result
assert future.done()
def test_exception():
expected_exception = status_pb2.Status(message="meep")
responses = [
make_operation_proto(),
# Second operation response includes the error.
make_operation_proto(done=True, error=expected_exception),
]
future, _, _ = make_operation_future(responses)
exception = future.exception()
assert expected_exception.message in "{!r}".format(exception)
def test_unexpected_result():
responses = [
make_operation_proto(),
# Second operation response is done, but has not error or response.
make_operation_proto(done=True),
]
future, _, _ = make_operation_future(responses)
exception = future.exception()
assert "Unexpected state" in "{!r}".format(exception)
def test__refresh_http():
api_request = mock.Mock(return_value={"name": TEST_OPERATION_NAME, "done": True})
result = operation._refresh_http(api_request, TEST_OPERATION_NAME)
assert result.name == TEST_OPERATION_NAME
assert result.done is True
api_request.assert_called_once_with(
method="GET", path="operations/{}".format(TEST_OPERATION_NAME)
)
def test__cancel_http():
api_request = mock.Mock()
operation._cancel_http(api_request, TEST_OPERATION_NAME)
api_request.assert_called_once_with(
method="POST", path="operations/{}:cancel".format(TEST_OPERATION_NAME)
)
def test_from_http_json():
operation_json = {"name": TEST_OPERATION_NAME, "done": True}
api_request = mock.sentinel.api_request
future = operation.from_http_json(
operation_json, api_request, struct_pb2.Struct, metadata_type=struct_pb2.Struct
)
assert future._result_type == struct_pb2.Struct
assert future._metadata_type == struct_pb2.Struct
assert future.operation.name == TEST_OPERATION_NAME
assert future.done
def test__refresh_grpc():
operations_stub = mock.Mock(spec=["GetOperation"])
expected_result = make_operation_proto(done=True)
operations_stub.GetOperation.return_value = expected_result
result = operation._refresh_grpc(operations_stub, TEST_OPERATION_NAME)
assert result == expected_result
expected_request = operations_pb2.GetOperationRequest(name=TEST_OPERATION_NAME)
operations_stub.GetOperation.assert_called_once_with(expected_request)
def test__cancel_grpc():
operations_stub = mock.Mock(spec=["CancelOperation"])
operation._cancel_grpc(operations_stub, TEST_OPERATION_NAME)
expected_request = operations_pb2.CancelOperationRequest(name=TEST_OPERATION_NAME)
operations_stub.CancelOperation.assert_called_once_with(expected_request)
def test_from_grpc():
operation_proto = make_operation_proto(done=True)
operations_stub = mock.sentinel.operations_stub
future = operation.from_grpc(
operation_proto,
operations_stub,
struct_pb2.Struct,
metadata_type=struct_pb2.Struct,
)
assert future._result_type == struct_pb2.Struct
assert future._metadata_type == struct_pb2.Struct
assert future.operation.name == TEST_OPERATION_NAME
assert future.done
def test_from_gapic():
operation_proto = make_operation_proto(done=True)
operations_client = mock.create_autospec(
operations_v1.OperationsClient, instance=True
)
future = operation.from_gapic(
operation_proto,
operations_client,
struct_pb2.Struct,
metadata_type=struct_pb2.Struct,
)
assert future._result_type == struct_pb2.Struct
assert future._metadata_type == struct_pb2.Struct
assert future.operation.name == TEST_OPERATION_NAME
assert future.done
|
|
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
from __future__ import print_function
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox, \
IdentityTransform, BboxTransformFrom
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch, FancyArrowPatch
from matplotlib import rcParams
from matplotlib import docstring
import matplotlib.cbook as cbook
#from bboximage import BboxImage
from matplotlib.image import BboxImage
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG = False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list)) / (len(w_list) - 1.)
offsets_ = np.add.accumulate([0] + [w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh + sep) * len(w_list)
else:
sep = float(total) / (len(w_list)) - maxh
offsets = np.array([(maxh + sep) * i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Given a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analogous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h - d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left", "top"]:
descent = 0.
offsets = [d for h, d in hd_list]
elif align in ["right", "bottom"]:
descent = 0.
offsets = [height - h + d for h, d in hd_list]
elif align == "center":
descent = 0.
offsets = [(height - h) * .5 + d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def __getstate__(self):
state = martist.Artist.__getstate__(self)
# pickle cannot save instancemethods, so handle them here
from cbook import _InstanceMethodPickler
import inspect
offset = state['_offset']
if inspect.ismethod(offset):
state['_offset'] = _InstanceMethodPickler(offset)
return state
def __setstate__(self, state):
self.__dict__ = state
from cbook import _InstanceMethodPickler
if isinstance(self._offset, _InstanceMethodPickler):
self._offset = self._offset.get_instancemethod()
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def contains(self, mouseevent):
for c in self.get_children():
a, b = c.contains(mouseevent)
if a:
return a, b
return False, {}
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent, renderer):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent, renderer)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_visible_children(self):
"""
Return a list of visible artists it contains.
"""
return [c for c in self._children if c.get_visible()]
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd, renderer)
return mtransforms.Bbox.from_bounds(px - xd, py - yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes. Can be one of 'top', 'bottom',
'left', 'right', 'center' and 'baseline'
*mode* : packing mode
.. note::
*pad* and *sep* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative positions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
.. note::
*pad* and *sep* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
if self.width is not None:
for c in self.get_visible_children():
if isinstance(c, PackerBase) and c.mode == "expand":
c.set_width(self.width)
whd_list = [c.get_extent(renderer) for c in self.get_visible_children()]
whd_list = [(w, h, xd, (h - yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w, h, xd, yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
sep, self.mode)
yoffsets = yoffsets_ + [yd for w, h, xd, yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjusts the relative positions of children at draw time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
.. note::
*pad* and *sep* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of children and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
whd_list = [c.get_extent(renderer) for c in self.get_visible_children()]
if not whd_list:
return 2 * pad, 2 * pad, pad, pad, []
if self.height is None:
height_descent = max([h - yd for w, h, xd, yd in whd_list])
ydescent = max([yd for w, h, xd, yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2 * pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
sep, self.mode)
xoffsets = xoffsets_ + [xd for w, h, xd, yd in whd_list]
xdescent = whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2 * pad, height + 2 * pad, \
xdescent + pad, ydescent + pad, \
zip(xoffsets, yoffsets)
class PaddedBox(OffsetBox):
def __init__(self, child, pad=None, draw_frame=False, patch_attrs=None):
"""
*pad* : boundary pad
.. note::
*pad* need to given in points and will be
scale with the renderer dpi, while *width* and *height*
need to be in pixels.
"""
super(PaddedBox, self).__init__()
self.pad = pad
self._children = [child]
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=1, #self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
if patch_attrs is not None:
self.patch.update(patch_attrs)
self._drawFrame = draw_frame
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
w, h, xd, yd = self._children[0].get_extent(renderer)
return w + 2 * pad, h + 2 * pad, \
xd + pad, yd + pad, \
[(0, 0)]
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
self.draw_frame(renderer)
for c in self.get_visible_children():
c.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw_frame(self, renderer):
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox)
if self._drawFrame:
self.patch.draw(renderer)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self.dpi_transform = mtransforms.Affine2D()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.dpi_transform + self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
dpi_cor = renderer.points_to_pixels(1.)
return self.width * dpi_cor, self.height * dpi_cor, \
self.xdescent * dpi_cor, self.ydescent * dpi_cor
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
if not a.is_transform_set():
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if "va" not in textprops:
textprops["va"] = "baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform + self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_text(self, s):
"set text"
self._text.set_text(s)
def get_text(self):
"get text"
return self._text.get_text()
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinates in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[-1][0] # last line
_, hh, dd = renderer.get_text_width_height_descent(
line, self._text._fontproperties, ismath=ismath)
d = dd # the baseline of the last line
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline:
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h - d)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(d, d_)
#else:
# d = d
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AuxTransformBox(OffsetBox):
"""
Offset Box with the aux_transform . Its children will be
transformed with the aux_transform first then will be
offseted. The absolute coordinate of the aux_transform is meaning
as it will be automatically adjust so that the left-lower corner
of the bounding box of children will be set to (0,0) before the
offset transform.
It is similar to drawing area, except that the extent of the box
is not predetermined but calculated from the window extent of its
children. Furthermore, the extent of the children will be
calculated in the transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
OffsetBox.__init__(self)
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
# ref_offset_transform is used to make the offset_transform is
# always reference to the lower-left corner of the bbox of its
# children.
self.ref_offset_transform = mtransforms.Affine2D()
self.ref_offset_transform.clear()
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.aux_transform + \
self.ref_offset_transform + \
self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y coordinate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
# clear the offset transforms
_off = self.offset_transform.to_values() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = mtransforms.Bbox.union(bboxes)
# adjust ref_offset_tansform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restor offset transform
mtx = self.offset_transform.matrix_from_values(*_off)
self.offset_transform.set_matrix(mtx)
return ub.width, ub.height, 0., 0.
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AnchoredOffsetbox(OffsetBox):
"""
An offset box placed according to the legend location
loc. AnchoredOffsetbox has a single child. When multiple children
is needed, use other OffsetBox class to enclose them. By default,
the offset box is anchored against its parent axes. You may
explicitly specify the bbox_to_anchor.
"""
zorder = 5 # zorder of the legend
def __init__(self, loc,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
loc is a string or an integer specifying the legend location.
The valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
pad : pad around the child for drawing a frame. given in
fraction of fontsize.
borderpad : pad between offsetbox frame and the bbox_to_anchor,
child : OffsetBox instance that will be anchored.
prop : font property. This is only used as a reference for paddings.
frameon : draw a frame box if True.
bbox_to_anchor : bbox to anchor. Use self.axes.bbox if None.
bbox_transform : with which the bbox_to_anchor will be transformed.
"""
super(AnchoredOffsetbox, self).__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
self.loc = loc
self.borderpad = borderpad
self.pad = pad
if prop is None:
self.prop = FontProperties(size=rcParams["legend.fontsize"])
elif isinstance(prop, dict):
self.prop = FontProperties(**prop)
if "size" not in prop:
self.prop.set_size(rcParams["legend.fontsize"])
else:
self.prop = prop
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=0)
self._drawFrame = frameon
def set_child(self, child):
"set the child to be anchored"
self._child = child
def get_child(self):
"return the child"
return self._child
def get_children(self):
"return the list of children"
return [self._child]
def get_extent(self, renderer):
"""
return the extent of the artist. The extent of the child
added with the pad is returned
"""
w, h, xd, yd = self.get_child().get_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w + 2 * pad, h + 2 * pad, xd + pad, yd + pad
def get_bbox_to_anchor(self):
"""
return the bbox that the legend will be anchored
"""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor,
transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
set the bbox that the child will be anchored.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError:
raise ValueError("Invalid argument for bbox : %s" % str(bbox))
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
self._update_offset_func(renderer)
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset(w, h, xd, yd, renderer)
return Bbox.from_bounds(ox - xd, oy - yd, w, h)
def _update_offset_func(self, renderer, fontsize=None):
"""
Update the offset func which depends on the dpi of the
renderer (because of the padding).
"""
if fontsize is None:
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
def _offset(w, h, xd, yd, renderer, fontsize=fontsize, self=self):
bbox = Bbox.from_bounds(0, 0, w, h)
borderpad = self.borderpad * fontsize
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = self._get_anchored_bbox(self.loc,
bbox,
bbox_to_anchor,
borderpad)
return x0 + xd, y0 + yd
self.set_offset(_offset)
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
"draw the artist"
if not self.get_visible(): return
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
if self._drawFrame:
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
def _get_anchored_bbox(self, loc, bbox, parentbbox, borderpad):
"""
return the position of the bbox anchored at the parentbbox
with the loc code, with the borderpad.
"""
assert loc in range(1, 11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs = {UR:"NE",
UL:"NW",
LL:"SW",
LR:"SE",
R:"E",
CL:"W",
CR:"E",
LC:"S",
UC:"N",
C:"C"}
c = anchor_coefs[loc]
container = parentbbox.padded(-borderpad)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
class AnchoredText(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text
"""
def __init__(self, s, loc, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
*s* : string
*loc* : location code
*prop* : font property
*pad* : pad between the text and the frame as fraction of the font size.
*borderpad* : pad between the frame and the axes (or bbox_to_anchor).
other keyword parameters of AnchoredOffsetbox are also allowed.
"""
self.txt = TextArea(s, textprops=prop,
minimumdescent=False)
fp = self.txt._text.get_fontproperties()
super(AnchoredText, self).__init__(loc, pad=pad, borderpad=borderpad,
child=self.txt,
prop=fp,
**kwargs)
class OffsetImage(OffsetBox):
def __init__(self, arr,
zoom=1,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
self._dpi_cor = dpi_cor
self.image = BboxImage(bbox=self.get_window_extent,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
OffsetBox.__init__(self)
def set_data(self, arr):
self._data = np.asarray(arr)
self.image.set_data(self._data)
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
def get_zoom(self):
return self._zoom
# def set_axes(self, axes):
# self.image.set_axes(axes)
# martist.Artist.set_axes(self, axes)
# def set_offset(self, xy):
# """
# set offset of the container.
# Accept : tuple of x,y coordinate in disokay units.
# """
# self._offset = xy
# self.offset_transform.clear()
# self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_children(self):
return [self.image]
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset()
return mtransforms.Bbox.from_bounds(ox - xd, oy - yd, w, h)
def get_extent(self, renderer):
if self._dpi_cor: # True, do correction
dpi_cor = renderer.points_to_pixels(1.)
else:
dpi_cor = 1.
zoom = self.get_zoom()
data = self.get_data()
ny, nx = data.shape[:2]
w, h = nx * zoom, ny * zoom
return w, h, 0, 0
def draw(self, renderer):
"""
Draw the children
"""
self.image.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
from matplotlib.text import _AnnotationBase
class AnnotationBbox(martist.Artist, _AnnotationBase):
"""
Annotation-like class, but with offsetbox instead of Text.
"""
zorder = 3
def __str__(self):
return "AnnotationBbox(%g,%g)" % (self.xy[0], self.xy[1])
@docstring.dedent_interpd
def __init__(self, offsetbox, xy,
xybox=None,
xycoords='data',
boxcoords=None,
frameon=True, pad=0.4, # BboxPatch
annotation_clip=None,
box_alignment=(0.5, 0.5),
bboxprops=None,
arrowprops=None,
fontsize=None,
**kwargs):
"""
*offsetbox* : OffsetBox instance
*xycoords* : same as Annotation but can be a tuple of two
strings which are interpreted as x and y coordinates.
*boxcoords* : similar to textcoords as Annotation but can be a
tuple of two strings which are interpreted as x and y
coordinates.
*box_alignment* : a tuple of two floats for a vertical and
horizontal alignment of the offset box w.r.t. the *boxcoords*.
The lower-left corner is (0.0) and upper-right corner is (1.1).
other parameters are identical to that of Annotation.
"""
self.offsetbox = offsetbox
self.arrowprops = arrowprops
self.set_fontsize(fontsize)
if arrowprops is not None:
self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**self.arrowprops)
else:
self._arrow_relpos = None
self.arrow_patch = None
_AnnotationBase.__init__(self,
xy, xytext=xybox,
xycoords=xycoords, textcoords=boxcoords,
annotation_clip=annotation_clip)
martist.Artist.__init__(self, **kwargs)
#self._fw, self._fh = 0., 0. # for alignment
self._box_alignment = box_alignment
# frame
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square", pad=pad)
if bboxprops:
self.patch.set(**bboxprops)
self._drawFrame = frameon
def contains(self, event):
t, tinfo = self.offsetbox.contains(event)
#if self.arrow_patch is not None:
# a,ainfo=self.arrow_patch.contains(event)
# t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t, tinfo
def get_children(self):
children = [self.offsetbox, self.patch]
if self.arrow_patch:
children.append(self.arrow_patch)
return children
def set_figure(self, fig):
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
self.offsetbox.set_figure(fig)
martist.Artist.set_figure(self, fig)
def set_fontsize(self, s=None):
"""
set fontsize in points
"""
if s is None:
s = rcParams["legend.fontsize"]
self.prop = FontProperties(size=s)
def get_fontsize(self, s=None):
"""
return fontsize in points
"""
return self.prop.get_size_in_points()
def update_positions(self, renderer):
"Update the pixel positions of the annotated point and the text."
xy_pixel = self._get_position_xy(renderer)
self._update_position_xybox(renderer, xy_pixel)
mutation_scale = renderer.points_to_pixels(self.get_fontsize())
self.patch.set_mutation_scale(mutation_scale)
if self.arrow_patch:
self.arrow_patch.set_mutation_scale(mutation_scale)
def _update_position_xybox(self, renderer, xy_pixel):
"Update the pixel positions of the annotation text and the arrow patch."
x, y = self.xytext
if isinstance(self.textcoords, tuple):
xcoord, ycoord = self.textcoords
x1, y1 = self._get_xy(renderer, x, y, xcoord)
x2, y2 = self._get_xy(renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = self._get_xy(renderer, x, y, self.textcoords)
w, h, xd, yd = self.offsetbox.get_extent(renderer)
_fw, _fh = self._box_alignment
self.offsetbox.set_offset((ox0 - _fw * w + xd, oy0 - _fh * h + yd))
# update patch position
bbox = self.offsetbox.get_window_extent(renderer)
#self.offsetbox.set_offset((ox0-_fw*w, oy0-_fh*h))
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
x, y = xy_pixel
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
fs = self.prop.get_size_in_points()
mutation_scale = d.pop("mutation_scale", fs)
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
patchA = d.pop("patchA", self.patch)
self.arrow_patch.set_patchA(patchA)
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible(): return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self.update_positions(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
if self._drawFrame:
self.patch.draw(renderer)
self.offsetbox.draw(renderer)
class DraggableBase(object):
"""
helper code for a draggable artist (legend, offsetbox)
The derived class must override following two method.
def saveoffset(self):
pass
def update_offset(self, dx, dy):
pass
*saveoffset* is called when the object is picked for dragging and it is
meant to save reference position of the artist.
*update_offset* is called during the dragging. dx and dy is the pixel
offset from the point where the mouse drag started.
Optionally you may override following two methods.
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def finalize_offset(self):
pass
*artist_picker* is a picker method that will be
used. *finalize_offset* is called when the mouse is released. In
current implementaion of DraggableLegend and DraggableAnnotation,
*update_offset* places the artists simply in display
coordinates. And *finalize_offset* recalculate their position in
the normalized axes coordinate and set a relavant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
self.got_artist = False
self._use_blit = use_blit
self.canvas = self.ref_artist.figure.canvas
c2 = self.canvas.mpl_connect('pick_event', self.on_pick)
c3 = self.canvas.mpl_connect('button_release_event', self.on_release)
ref_artist.set_picker(self.artist_picker)
self.cids = [c2, c3]
def on_motion(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.draw()
def on_motion_blit(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
def on_pick(self, evt):
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.got_artist = True
if self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.ref_artist.figure.bbox)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
self._c1 = self.canvas.mpl_connect('motion_notify_event', self.on_motion_blit)
else:
self._c1 = self.canvas.mpl_connect('motion_notify_event', self.on_motion)
self.save_offset()
def on_release(self, event):
if self.got_artist:
self.finalize_offset()
self.got_artist = False
self.canvas.mpl_disconnect(self._c1)
if self._use_blit:
self.ref_artist.set_animated(False)
def disconnect(self):
'disconnect the callbacks'
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
class DraggableOffsetBox(DraggableBase):
def __init__(self, ref_artist, offsetbox, use_blit=False):
DraggableBase.__init__(self, ref_artist, use_blit=use_blit)
self.offsetbox = offsetbox
def save_offset(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
offset = offsetbox.get_offset(w, h, xd, yd, renderer)
self.offsetbox_x, self.offsetbox_y = offset
self.offsetbox.set_offset(offset)
def update_offset(self, dx, dy):
loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
self.offsetbox.set_offset(loc_in_canvas)
def get_loc_in_canvas(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
ox, oy = offsetbox._offset
loc_in_canvas = (ox - xd, oy - yd)
return loc_in_canvas
class DraggableAnnotation(DraggableBase):
def __init__(self, annotation, use_blit=False):
DraggableBase.__init__(self, annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
x, y = ann.xytext
if isinstance(ann.textcoords, tuple):
xcoord, ycoord = ann.textcoords
x1, y1 = ann._get_xy(self.canvas.renderer, x, y, xcoord)
x2, y2 = ann._get_xy(self.canvas.renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = ann._get_xy(self.canvas.renderer, x, y, ann.textcoords)
self.ox, self.oy = ox0, oy0
self.annotation.textcoords = "figure pixels"
self.update_offset(0, 0)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xytext = self.ox + dx, self.oy + dy
x, y = ann.xytext
xy = ann._get_xy(self.canvas.renderer, x, y, ann.textcoords)
def finalize_offset(self):
loc_in_canvas = self.annotation.xytext
self.annotation.textcoords = "axes fraction"
pos_axes_fraction = self.annotation.axes.transAxes.inverted().transform_point(loc_in_canvas)
self.annotation.xytext = tuple(pos_axes_fraction)
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = plt.subplot(121)
#txt = ax.text(0.5, 0.5, "Test", size=30, ha="center", color="w")
kwargs = dict()
a = np.arange(256).reshape(16, 16) / 256.
myimage = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ax.add_artist(myimage)
myimage.set_offset((100, 100))
myimage2 = OffsetImage(a,
zoom=2,
norm=None,
origin=None,
**kwargs
)
ann = AnnotationBbox(myimage2, (0.5, 0.5),
xybox=(30, 30),
xycoords='data',
boxcoords="offset points",
frameon=True, pad=0.4, # BboxPatch
bboxprops=dict(boxstyle="round", fc="y"),
fontsize=None,
arrowprops=dict(arrowstyle="->"),
)
ax.add_artist(ann)
plt.draw()
plt.show()
|
|
from django.db.models import Count
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS
from tastypie import http, fields
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.bundle import Bundle
import json
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.http import HttpResponse
from .models import Run, RunCaseVersion, RunSuite, Result
from ..mtapi import MTResource, MTApiKeyAuthentication, MTAuthorization
from ..core.api import (ProductVersionResource, ProductResource,
ReportResultsAuthorization, UserResource)
from ..environments.api import EnvironmentResource
from ..environments.models import Environment
from ..library.api import (CaseVersionResource, BaseSelectionResource,
SuiteResource)
from ..library.models import CaseVersion, Suite
from ...view.lists.filters import filter_url
import logging
logger = logging.getLogger(__name__)
class RunSuiteAuthorization(MTAuthorization):
"""Atypically named permission."""
@property
def permission(self):
"""This permission should be checked by is_authorized."""
return "execution.manage_runs"
class RunCaseVersionResource(ModelResource):
"""
RunCaseVersion represents the connection between a run and a caseversion.
It is possible to return a result for each runcaseversion. So the result
will sit as a peer to the caseversion under the runcaseversion.
"""
run = fields.ToOneField(
"moztrap.model.execution.api.RunResource",
"run",
related_name="runcaseversion")
caseversion = fields.ToOneField(CaseVersionResource, "caseversion", full=True)
class Meta:
queryset = RunCaseVersion.objects.all()
list_allowed_methods = ['get']
filtering = {
"run": ALL_WITH_RELATIONS,
"caseversion": ALL_WITH_RELATIONS,
}
fields = ["id", "run"]
class RunResource(ModelResource):
"""
Fetch the test runs for the specified product and version.
It is also possible to create a new testrun, when posted.
"""
productversion = fields.ForeignKey(ProductVersionResource, "productversion")
environments = fields.ToManyField(
EnvironmentResource,
"environments",
full=False,
)
runcaseversions = fields.ToManyField(
RunCaseVersionResource,
"runcaseversions",
)
class Meta:
queryset = Run.objects.all()
list_allowed_methods = ["get", "post"]
fields = [
"id",
"name",
"description",
"status",
"productversion",
"environments",
"runcaseversions",
]
filtering = {
"productversion": ALL_WITH_RELATIONS,
"status": "exact",
}
authentication = MTApiKeyAuthentication()
authorization = ReportResultsAuthorization()
always_return_data = True
def dehydrate(self, bundle):
"""Add some convenience fields to the return JSON."""
pv = bundle.obj.productversion
bundle.data["productversion_name"] = pv.version
bundle.data["product_name"] = pv.product.name
return bundle
def dispatch_detail(self, request, **kwargs):
"""For details, we want the full info on environments for the run """
self.fields["environments"].full = True
return super(RunResource, self).dispatch_detail(request, **kwargs)
def dispatch_list(self, request, **kwargs):
"""For list, we don't want the full info on environments """
self.fields["environments"].full = False
return super(RunResource, self).dispatch_list(request, **kwargs)
def create_response(self, request, data,
response_class=HttpResponse, **response_kwargs):
"""On posting a run, return a url to the MozTrap UI for that new run."""
resp = super(RunResource, self).create_response(
request,
data,
response_class=response_class,
**response_kwargs
)
if isinstance(data, Bundle):
# data will be a bundle if we are creating a new Run. And in that
# case we want to add a URI to viewing this new run result in the UI
full_url = filter_url(
"results_runcaseversions",
Run.objects.get(pk=data.data["id"]),
)
new_content = json.loads(resp.content)
new_content["ui_uri"] = full_url
new_content["resource_uri"] = data.data["resource_uri"]
resp.content = json.dumps(new_content)
# need to set the content type to application/json
resp._headers["content-type"] = ("Content-Type", "application/json; charset=utf-8")
return resp
def obj_create(self, bundle, request=None, **kwargs):
"""Set the created_by field for the run to the request's user"""
bundle = super(RunResource, self).obj_create(bundle=bundle, request=request, **kwargs)
bundle.obj.created_by = request.user
bundle.obj.save()
return bundle
def hydrate_runcaseversions(self, bundle):
"""
Handle the runcaseversion creation during a POST of a new Run.
Tastypie handles the creation of the run itself. But we handle the
RunCaseVersions and Results because we have special handler methods for
setting the statuses which we want to keep DRY.
"""
try:
run = bundle.obj
run.save()
# walk results
for data in bundle.data["runcaseversions"]:
status = data.pop("status")
# find caseversion for case
cv = CaseVersion.objects.get(
productversion=run.productversion,
case=data.pop("case"),
)
# create runcaseversion for this run to caseversion
rcv, created = RunCaseVersion.objects.get_or_create(
run=run,
caseversion=cv,
)
data["user"] = bundle.request.user
data["environment"] = Environment.objects.get(
pk=data["environment"])
# create result via methods on runcaseversion
rcv.get_result_method(status)(**data)
bundle.data["runcaseversions"] = []
return bundle
except KeyError as e:
raise ValidationError(
"bad result object data missing key: {0}".format(e))
except ObjectDoesNotExist as e:
raise ValidationError(e)
class ResultResource(ModelResource):
"""
Endpoint for submitting results for a set of runcaseversions.
This endpoint is write only. The submitted result objects should
be formed like this::
{
"objects": [
{
"case": "1",
"environment": "23",
"run_id": "1",
"status": "passed"
},
{
"case": "14",
"comment": "why u no make sense??",
"environment": "23",
"run_id": "1",
"status": "invalidated"
},
{
"bug": "http://www.deathvalleydogs.com",
"case": "326",
"comment": "why u no pass?",
"environment": "23",
"run_id": "1",
"status": "failed",
"stepnumber": 1
}
]
}
"""
class Meta:
queryset = Result.objects.all()
resource_name = "result"
list_allowed_methods = ["patch"]
authentication = MTApiKeyAuthentication()
authorization = ReportResultsAuthorization()
def obj_create(self, bundle, request=None, **kwargs):
"""
Manually create the proper results objects.
This is necessary because we have special handler methods in
RunCaseVersion for setting the statuses which we want to keep DRY.
"""
data = bundle.data.copy()
try:
status = data.pop("status")
case = data.pop("case")
env = Environment.objects.get(pk=data.get("environment"))
run = data.pop("run_id")
except KeyError as e:
raise ValidationError(
"bad result object data missing key: {0}".format(e))
except Environment.DoesNotExist as e:
raise ValidationError(
"Specified environment does not exist: {0}".format(e))
data["environment"] = env
try:
rcv = RunCaseVersion.objects.get(
run__id=run,
caseversion__case__id=case,
environments=env,
)
except RunCaseVersion.DoesNotExist as e:
raise ValidationError(
"RunCaseVersion not found for run: {0}, case: {1}, environment: {2}:\nError {3}".format(
str(run), str(case), str(env), e))
data["user"] = request.user
bundle.obj = rcv.get_result_method(status)(**data)
return bundle
class RunSuiteResource(MTResource):
"""
Create, Read, Update and Delete capabilities for RunSuite.
Filterable by suite and run fields.
"""
run = fields.ForeignKey(RunResource, 'run')
suite = fields.ForeignKey(SuiteResource, 'suite')
class Meta(MTResource.Meta):
queryset = RunSuite.objects.all()
fields = ["suite", "run", "order", "id"]
filtering = {
"suite": ALL_WITH_RELATIONS,
"run": ALL_WITH_RELATIONS
}
authorization = RunSuiteAuthorization()
@property
def model(self):
return RunSuite
@property
def read_create_fields(self):
"""run and suite are read-only"""
return ["suite", "run"]
def hydrate_suite(self, bundle):
"""suite is read-only on PUT
suite.product must match run.productversion.product on CREATE
"""
# CREATE
if bundle.request.META['REQUEST_METHOD'] == 'POST':
suite_id = self._id_from_uri(bundle.data['suite'])
suite = Suite.objects.get(id=suite_id)
run_id = self._id_from_uri(bundle.data['run'])
run = Run.objects.get(id=run_id)
if suite.product.id != run.productversion.product.id:
error_message = str(
"suite's product must match run's product."
)
logger.error(
"\n".join([error_message, "suite prod: %s, run prod: %s"]),
suite.product.id, run.productversion.product.id)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(error_message))
return bundle
class SuiteSelectionResource(BaseSelectionResource):
"""
Specialty end-point for an AJAX call from the multi-select widget
for selecting suites.
"""
product = fields.ForeignKey(ProductResource, "product")
runs = fields.ToManyField(RunResource, "runs")
created_by = fields.ForeignKey(
UserResource, "created_by", full=True, null=True)
class Meta:
queryset = Suite.objects.all().select_related(
"created_by",
).annotate(case_count=Count("cases"))
list_allowed_methods = ['get']
fields = ["id", "name", "created_by"]
filtering = {
"product": ALL_WITH_RELATIONS,
"runs": ALL_WITH_RELATIONS,
"created_by": ALL_WITH_RELATIONS,
}
ordering = ["runs"]
def dehydrate(self, bundle):
"""Add some convenience fields to the return JSON."""
suite = bundle.obj
bundle.data["suite_id"] = unicode(suite.id)
bundle.data["case_count"] = suite.case_count
bundle.data["filter_cases"] = filter_url("manage_cases", suite)
return bundle
|
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
# =============standard library imports ========================
import codecs
import glob
import os
import sys
import time
import serial
# =============local library imports ==========================
from .communicator import Communicator, process_response, prep_str, remove_eol_func
def get_ports():
if sys.platform == "win32":
ports = ["COM{}".format(i + 1) for i in range(256)]
else:
usb = glob.glob("/dev/tty.usb*")
furpi = glob.glob("/dev/furpi.*")
pychron = glob.glob("/dev/pychron.*")
slab = glob.glob("/dev/tty.SLAB*")
if sys.platform == "darwin":
keyspan = glob.glob("/dev/tty.U*")
else:
keyspan = glob.glob("/dev/ttyU*")
ports = keyspan + usb + furpi + pychron + slab
return ports
class SerialCommunicator(Communicator):
"""
Base Class for devices that communicate using a rs232 serial port.
Using Keyspan serial converter is the best option for a Mac
class is built on top of pyserial. Pyserial is used to create a handle and
this class uses the handle to read and write.
handles are created when a serial device is opened
setup args are loaded using load(). this method should be overwritten to
load specific items.
"""
# char_write = False
_auto_find_handle = False
_auto_write_handle = False
baudrate = None
port = None
bytesize = None
parity = None
stopbits = None
timeout = None
id_query = ""
id_response = ""
read_delay = None
read_terminator = None
read_terminator_position = None
clear_output = False
echos_command = False
_config = None
_comms_report_attrs = (
"port",
"baudrate",
"bytesize",
"parity",
"stopbits",
"timeout",
)
@property
def address(self):
return self.port
def test_connection(self):
return self.handle is not None
def reset(self):
handle = self.handle
try:
isopen = handle.isOpen()
orate = handle.getBaudrate()
if isopen:
handle.close()
handle.setBaudrate(0)
handle.open()
time.sleep(0.1)
handle.close()
handle.setBaudrate(orate)
if isopen:
handle.open()
except Exception:
self.warning("failed to reset connection")
def close(self):
if self.handle:
self.debug("closing handle {}".format(self.handle))
self.handle.close()
def load_comdict(self, port, baudrate=9600, bytesize=8, parity=None, stopbits=1):
self.baudrate = baudrate
self.port = port
self.set_parity(parity)
self.set_stopbits(stopbits)
self.bytesize = bytesize
def load(self, config, path):
self.config_path = path
self._config = config
self.set_attribute(config, "port", "Communications", "port")
self.set_attribute(
config, "baudrate", "Communications", "baudrate", cast="int", optional=True
)
self.set_attribute(
config, "bytesize", "Communications", "bytesize", cast="int", optional=True
)
self.set_attribute(
config, "timeout", "Communications", "timeout", cast="float", optional=True
)
self.set_attribute(
config,
"clear_output",
"Communications",
"clear_output",
cast="boolean",
optional=True,
)
parity = self.config_get(config, "Communications", "parity", optional=True)
self.set_parity(parity)
stopbits = self.config_get(config, "Communications", "stopbits", optional=True)
self.set_stopbits(stopbits)
self.set_attribute(
config,
"read_delay",
"Communications",
"read_delay",
cast="float",
optional=True,
default=25,
)
self.set_attribute(
config,
"read_terminator",
"Communications",
"terminator",
optional=True,
default=None,
)
self.set_attribute(
config,
"read_terminator_position",
"Communications",
"terminator_position",
optional=True,
default=None,
cast="int",
)
self.set_attribute(
config,
"write_terminator",
"Communications",
"write_terminator",
optional=True,
default=b"\r",
)
if self.write_terminator == "CRLF":
self.write_terminator = b"\r\n"
if self.read_terminator == "CRLF":
self.read_terminator = b"\r\n"
if self.read_terminator == "ETX":
self.read_terminator = chr(3)
def set_parity(self, parity):
if parity:
self.parity = getattr(serial, "PARITY_%s" % parity.upper())
def set_stopbits(self, stopbits):
if stopbits:
if stopbits in ("1", 1):
stopbits = "ONE"
elif stopbits in ("2", 2):
stopbits = "TWO"
self.stopbits = getattr(serial, "STOPBITS_{}".format(stopbits.upper()))
def tell(self, cmd, is_hex=False, info=None, verbose=True, **kw):
""" """
if self.handle is None:
if verbose:
info = "no handle"
self.log_tell(cmd, info)
return
with self._lock:
self._write(cmd, is_hex=is_hex)
if verbose:
self.log_tell(cmd, info)
def read(self, nchars=None, *args, **kw):
""" """
with self._lock:
if nchars is not None:
r = self._read_nchars(nchars)
else:
r = self._read_terminator(*args, **kw)
return r
def ask(
self,
cmd,
is_hex=False,
verbose=True,
delay=None,
replace=None,
remove_eol=True,
info=None,
nbytes=None,
handshake_only=False,
handshake=None,
read_terminator=None,
terminator_position=None,
nchars=None,
):
""" """
if self.handle is None:
if verbose:
x = prep_str(cmd.strip())
self.info("no handle {}".format(x))
return
if not self.handle.isOpen():
return
with self._lock:
if self.clear_output:
self.handle.flushInput()
self.handle.flushOutput()
cmd = self._write(cmd, is_hex=is_hex)
if cmd is None:
return
if is_hex:
if nbytes is None:
nbytes = 8
re = self._read_hex(nbytes=nbytes, delay=delay)
elif handshake is not None:
re = self._read_handshake(handshake, handshake_only, delay=delay)
elif nchars is not None:
re = self._read_nchars(nchars)
else:
re = self._read_terminator(
delay=delay,
terminator=read_terminator,
terminator_position=terminator_position,
)
if remove_eol and not is_hex:
re = remove_eol_func(re)
if verbose:
pre = process_response(re, replace, remove_eol=not is_hex)
self.log_response(cmd, pre, info)
return re
def open(self, **kw):
"""
Use pyserial to create a handle connected to port wth baudrate
default handle parameters
baudrate=9600
bytesize=EIGHTBITS
parity= PARITY_NONE
stopbits= STOPBITS_ONE
timeout=None
"""
port = kw.get("port")
if port is None:
port = self.port
if port is None:
self.warning("Port not set")
return False
# #on windows device handles probably handled differently
if sys.platform == "darwin":
port = "/dev/tty.{}".format(port)
kw["port"] = port
for key in ["baudrate", "bytesize", "parity", "stopbits", "timeout"]:
v = kw.get(key)
if v is None:
v = getattr(self, key)
if v is not None:
kw[key] = v
pref = kw.pop("prefs", None)
if pref is not None:
pref = pref.serial_preference
self._auto_find_handle = pref.auto_find_handle
self._auto_write_handle = pref.auto_write_handle
self.simulation = True
if self._validate_address(port):
try_connect = True
while try_connect:
try:
self.debug("Connection parameters={}".format(kw))
self.handle = serial.Serial(**kw)
try_connect = False
self.simulation = False
except serial.serialutil.SerialException:
try_connect = False
self.debug_exception()
elif self._auto_find_handle:
self._find_handle(**kw)
self.debug("Serial device: {}".format(self.handle))
return self.handle is not None # connected is true if handle is not None
# private
def _get_report_value(self, key):
c, value = super(SerialCommunicator, self)._get_report_value(key)
if self.handle:
value = getattr(self.handle, key)
return c, value
def _find_handle(self, **kw):
found = False
self.simulation = False
self.info("Trying to find correct port")
port = None
for port in get_ports():
self.info("trying port {}".format(port))
kw["port"] = port
try:
self.handle = serial.Serial(**kw)
except serial.SerialException:
continue
r = self.ask(self.id_query)
# use id_response as a callable to do device specific
# checking
if callable(self.id_response):
if self.id_response(r):
found = True
self.simulation = False
break
if r == self.id_response:
found = True
self.simulation = False
break
if not found:
# update the port
if self._auto_write_handle and port:
# port in form
# /dev/tty.USAXXX1.1
p = os.path.split(port)[-1]
# remove tty.
p = p[4:]
self._config.set(
"Communication",
"port",
)
self.write_configuration(self._config, self.config_path)
self.handle = None
self.simulation = True
def _validate_address(self, port):
"""
use glob to check the avaibable serial ports
valid ports start with /dev/tty.U or /dev/tty.usbmodem
"""
valid = get_ports()
if port in valid:
return True
else:
msg = "{} is not a valid port address".format(port)
self.warning(msg)
if not valid:
self.warning("No valid ports")
else:
self.warning("======== Valid Ports ========")
for v in valid:
self.warning(v)
self.warning("=============================")
def _write(self, cmd, is_hex=False):
"""
use the serial handle to write the cmd to the serial buffer
return True if there is an exception writing cmd
"""
if not self.simulation:
# want to write back the original cmd
# use command locally
command = cmd
if not isinstance(command, bytes):
command = bytes(command, "utf-8")
if is_hex:
command = codecs.decode(command, "hex")
else:
wt = self.write_terminator
if wt is not None:
if not isinstance(wt, bytes):
wt = bytes(wt, "utf-8")
command += wt
cmd = command
try:
self.handle.write(command)
except (
serial.serialutil.SerialException,
OSError,
IOError,
ValueError,
) as e:
self.warning("Serial Communicator write execption: {}".format(e))
return
return cmd
def _read_nchars(self, nchars, timeout=1, delay=None):
return self._read_loop(lambda r: self._get_nchars(nchars, r), delay, timeout)
def _read_hex(self, nbytes=8, timeout=1, delay=None):
return self._read_loop(lambda r: self._get_nbytes(nbytes, r), delay, timeout)
def _read_handshake(self, handshake, handshake_only, timeout=1, delay=None):
def hfunc(r):
terminated = False
ack, r = self._check_handshake(handshake)
if handshake_only and ack:
r = handshake[0]
terminated = True
elif ack and r is not None:
terminated = True
return r, terminated
return self._read_loop(hfunc, delay, timeout)
def _read_terminator(
self, timeout=1, delay=None, terminator=None, terminator_position=None
):
if terminator is None:
terminator = self.read_terminator
if terminator_position is None:
terminator_position = self.read_terminator_position
if terminator is None:
terminator = (b"\r\x00", b"\r\n", b"\r", b"\n")
if not isinstance(terminator, (list, tuple)):
terminator = (terminator,)
def func(r):
terminated = False
try:
if self.echos_command:
inw = 1
else:
inw = self.handle.inWaiting()
r += self.handle.read(inw)
if r and r.strip():
for ti in terminator:
if terminator_position:
terminated = r[terminator_position] == ti
else:
if isinstance(ti, str):
ti = ti.encode()
terminated = r.endswith(ti)
if terminated:
break
except BaseException as e:
self.warning(e)
return r, terminated
if self.echos_command:
self._read_loop(func, delay, timeout)
return self._read_loop(func, delay, timeout)
def _get_nbytes(self, *args, **kw):
"""
1 byte == 2 chars
"""
return self._get_nchars(*args, **kw)
def _get_nchars(self, nchars, r):
handle = self.handle
inw = handle.inWaiting()
c = min(inw, nchars - len(r))
r += handle.read(c)
return r[:nchars], len(r) >= nchars
def _check_handshake(self, handshake_chrs):
ack, nak = handshake_chrs
inw = self.handle.inWaiting()
r = self.handle.read(inw)
if r:
return ack == r[0], r[1:]
return False, None
def _read_loop(self, func, delay, timeout=1):
if delay is not None:
time.sleep(delay / 1000.0)
elif self.read_delay:
time.sleep(self.read_delay / 1000.0)
r = b""
st = time.time()
handle = self.handle
ct = time.time()
while ct - st < timeout:
if not handle.isOpen():
break
try:
r, isterminated = func(r)
if isterminated:
break
except (ValueError, TypeError):
pass
time.sleep(0.01)
ct = time.time()
if ct - st > timeout:
l = len(r) if r else 0
self.info("timed out. {}s r={}, len={}".format(timeout, r, l))
return r
if __name__ == "__main__":
s = SerialCommunicator()
s.read_delay = 0
s.port = "usbmodemfd1221"
s.open()
time.sleep(2)
s.tell("A", verbose=False)
for i in range(10):
print("dddd", s.ask("1", verbose=False))
time.sleep(1)
# s.tell('ddd', verbose=False)
# print s.ask('ddd', verbose=False)
# ===================== EOF ==========================================
|
|
"""
Created on 4 May 2021
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
example JSON:
{"serial_number": "27-000001", "type": "810-0023-02", "calibrated_on": "2016-11-01",
"sn1": {"serial_number": "212060308", "sensor_type": "NO2A43F"},
"sn2": {"serial_number": "132950202", "sensor_type": "CO A4"},
"sn3": {"serial_number": "134060009", "sensor_type": "SO2A4"},
"sn4": {"serial_number": "133910023", "sensor_type": "H2SA4"}}
"""
from collections import OrderedDict
from scs_core.data.datum import Datum
from scs_core.data.json import JSONable, PersistentJSONable
from scs_core.data.str import Str
from scs_core.gas.afe_calib import AFECalib
from scs_core.gas.sensor import Sensor
# --------------------------------------------------------------------------------------------------------------------
class AFEId(PersistentJSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def persistence_location(cls):
return AFECalib.persistence_location()
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict, skeleton=False):
if not jdict:
return None
serial_number = jdict.get('serial_number')
afe_type = jdict.get('type')
calibrated_on = Datum.date(jdict.get('calibrated_on'))
sensor_ids = []
for key in sorted(jdict.keys()):
if key[:2] == "sn":
if jdict[key] is None:
sensor_ids.append(None)
continue
sensor_ids.append(SensorId.construct_from_jdict(jdict[key]))
return cls(serial_number, afe_type, calibrated_on, sensor_ids)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, serial_number, afe_type, calibrated_on, sensor_ids):
"""
Constructor
"""
super().__init__()
self.__serial_number = serial_number # string
self.__afe_type = afe_type # string
self.__calibrated_on = calibrated_on # date
self.__sensor_ids = sensor_ids # array of SensorId
def __eq__(self, other):
try:
if len(self) != len(other):
return False
for i in range(len(self)):
if self.sensor_id(i) != other.sensor_id(i):
return False
return self.serial_number == other.serial_number and self.afe_type == other.afe_type and \
self.calibrated_on == other.calibrated_on
except (AttributeError, TypeError):
return False
def __len__(self):
return len(self.__sensor_ids)
# ----------------------------------------------------------------------------------------------------------------
def sensor_index(self, gas_name):
for i in range(len(self.__sensor_ids)):
sensor_id = self.__sensor_ids[i]
if sensor_id is None:
continue
sensor = Sensor.find(sensor_id.serial_number)
if sensor is None:
raise ValueError(sensor_id.serial_number)
if sensor.gas_name == gas_name:
return i
return None
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['serial_number'] = self.serial_number
jdict['type'] = self.afe_type
jdict['calibrated_on'] = self.calibrated_on.isoformat() if self.calibrated_on else None
for i in range(len(self.__sensor_ids)):
jdict['sn' + str(i + 1)] = self.__sensor_ids[i]
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def serial_number(self):
return self.__serial_number
@property
def afe_type(self):
return self.__afe_type
@property
def calibrated_on(self):
return self.__calibrated_on
def sensor_id(self, i):
return self.__sensor_ids[i]
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "AFEId:{serial_number:%s, afe_type:%s, calibrated_on:%s, sensor_ids:%s}" % \
(self.serial_number, self.afe_type, self.calibrated_on, Str.collection(self.__sensor_ids))
# --------------------------------------------------------------------------------------------------------------------
class SensorId(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict):
if not jdict:
return None
serial_number = jdict.get('serial_number')
sensor_type = jdict.get('sensor_type')
return cls(serial_number, sensor_type)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, serial_number, sensor_type):
"""
Constructor
"""
self.__serial_number = serial_number # int
self.__sensor_type = sensor_type # string
def __eq__(self, other):
try:
return self.serial_number == other.serial_number and self.sensor_type == other.sensor_type
except (TypeError, AttributeError):
return False
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['serial_number'] = self.serial_number
jdict['sensor_type'] = self.sensor_type
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def serial_number(self):
return self.__serial_number
@property
def sensor_type(self):
return self.__sensor_type
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "SensorId:{serial_number:%s, sensor_type:%s}" % (self.serial_number, self.sensor_type)
|
|
# make the other metrics work
# generate the txt files, then work on the pdf otuput
__version__ = "0.1.0"
import numpy as np
import pandas as pd
# import matplotlib
# matplotlib.use('pdf')
import matplotlib.pyplot as plt
import sys
import os
import networkx as nx
import PHRG
import probabilistic_cfg as pcfg
import net_metrics as metrics
import re # import pprint as pp
import argparse, traceback
DBG = False
def get_parser ():
parser = argparse.ArgumentParser(description='exact_phrg: infer a model given a graph (derive a model')
parser.add_argument('g_fname', metavar='G_FNAME', nargs=1, help='Filename of edgelist graph')
parser.add_argument('--chunglu', help='Generate chunglu graphs',action='store_true')
parser.add_argument('--kron', help='Generate Kronecker product graphs',action='store_true')
parser.add_argument('--version', action='version', version=__version__)
return parser
def Hstar_Graphs_Control (G, graph_name, axs):
print '-',Hstar_Graphs_Control,'-'
# Derive the prod rules in a naive way, where
prod_rules = PHRG.probabilistic_hrg_learning(G)
print prod_rules
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
num_nodes = G.number_of_nodes()
print "Starting max size", 'n=', num_nodes
g.set_max_size(num_nodes)
print "Done with max size"
Hstars = []
num_samples = 20
print '*' * 40
for i in range(0, num_samples):
rule_list = g.sample(num_nodes)
hstar = PHRG.grow(rule_list, g)[0]
Hstars.append(hstar)
# if 0:
# g = nx.from_pandas_dataframe(df, 'src', 'trg', edge_attr=['ts'])
# draw_degree_whole_graph(g,axs)
# draw_degree(Hstars, axs=axs, col='r')
# #axs.set_title('Rules derived by ignoring time')
# axs.set_ylabel('Frequency')
# axs.set_xlabel('degree')
if 1:
# metricx = [ 'degree','hops', 'clust', 'assort', 'kcore','eigen','gcd']
metricx = ['degree', 'gcd']
# g = nx.from_pandas_dataframe(df, 'src', 'trg',edge_attr=['ts'])
# graph_name = os.path.basename(f_path).rstrip('.tel')
if DBG: print ">", graph_name
metrics.network_properties([G], metricx, Hstars, name=graph_name, out_tsv=True)
def pandas_dataframes_from_edgelists (el_files):
if (el_files is None): return
list_of_dataframes = []
for f in el_files:
print '~' * 80
print f
temporal_graph = False
with open(f, 'r') as ifile:
line = ifile.readline()
while (not temporal_graph):
if ("%" in line):
line = ifile.readline()
elif len(line.split()) > 3:
temporal_graph = True
if (temporal_graph):
dat = np.genfromtxt(f, dtype=np.int64, comments='%', delimiter="\t", usecols=[0, 1, 3], autostrip=True)
df = pd.DataFrame(dat, columns=['src', 'trg', 'ts'])
else:
dat = np.genfromtxt(f, dtype=np.int64, comments='%', delimiter="\t", usecols=[0, 1], autostrip=True)
df = pd.DataFrame(dat, columns=['src', 'trg'])
df = df.drop_duplicates()
list_of_dataframes.append(df)
return list_of_dataframes
def grow_exact_size_hrg_graphs_from_prod_rules(prod_rules, gname, n, runs=1):
"""
Args:
rules: production rules (model)
gname: graph name
n: target graph order (number of nodes)
runs: how many graphs to generate
Returns: list of synthetic graphs
"""
if n <=0: sys.exit(1)
# print runs
# for i,x in enumerate(prod_rules):
# print i,' ', x
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
rhs = [f[1:-1] for f in re.findall("'.+?'", rhs)]
prob = float(prob)
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
# # mask = (pddf['ts'] >= pddf['ts'].min()+ span*kSlice) & (pddf['ts'] < pddf['ts'].min()+ span*(kSlice +1))
# mask = (pddf['ts'] >= pddf['ts'].min()) & (pddf['ts'] < pddf['ts'].min() + span * (kSlice + 1))
# ldf = pddf.loc[mask]
#
# G = nx.from_pandas_dataframe(ldf, 'src', 'trg', ['ts'])
#
num_nodes = n
if DBG: print "Starting max size"
g.set_max_size(num_nodes)
if DBG: print "Done with max size"
#
# num_samples = 20
if DBG: print '*' * 40
hstars_lst = []
for i in range(0, runs):
rule_list = g.sample(num_nodes)
hstar = PHRG.grow(rule_list, g)[0]
hstars_lst.append(hstar)
# print rule_list
return hstars_lst
def pwrlaw_plot (xdata, ydata, yerr):
from scipy import linspace, randn, log10, optimize, sqrt
powerlaw = lambda x, amp, index: amp * (x**index)
logx = log10(xdata)
logy = log10(ydata)
logyerr = yerr / ydata
# define our (line) fitting function
fitfunc = lambda p, x: p[0] + p[1] * x
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
pinit = [1.0, -1.0]
out = optimize.leastsq(errfunc, pinit,
args=(logx, logy, logyerr), full_output=1)
pfinal = out[0]
covar = out[1]
print pfinal
print covar
index = pfinal[1]
amp = 10.0**pfinal[0]
indexErr = sqrt( covar[0][0] )
ampErr = sqrt( covar[1][1] ) * amp
print index
# ########
# plotting
# ########
# ax.plot(ydata)
# ax.plot(pl_sequence)
fig, axs = plt.subplots(2,1)
axs[0].plot(xdata, powerlaw(xdata, amp, index)) # Fit
axs[0].errorbar(xdata, ydata, yerr=yerr, fmt='k.') # Data
(yh1,yh2) = (axs[0].get_ylim()[1]*.9, axs[0].get_ylim()[1]*.8)
xh = axs[0].get_xlim()[0]*1.1
print axs[0].get_ylim()
print (yh1,yh2)
axs[0].text(xh, yh1, 'Ampli = %5.2f +/- %5.2f' % (amp, ampErr))
axs[0].text(xh, yh2, 'Index = %5.2f +/- %5.2f' % (index, indexErr))
axs[0].set_title('Best Fit Power Law')
axs[0].set_xlabel('X')
axs[0].set_ylabel('Y')
# xlim(1, 11)
#
# subplot(2, 1, 2)
axs[1].loglog(xdata, powerlaw(xdata, amp, index))
axs[1].errorbar(xdata, ydata, yerr=yerr, fmt='k.') # Data
axs[1].set_xlabel('X (log scale)')
axs[1].set_ylabel('Y (log scale)')
import datetime
figfname = datetime.datetime.now().strftime("%d%b%y")+"_pl"
plt.savefig(figfname, bbox_inches='tight')
return figfname
def deg_vcnt_to_disk(orig_graph, synthetic_graphs):
df = pd.DataFrame(orig_graph.degree().items())
gb = df.groupby([1]).count()
# gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
gb.index.rename('k',inplace=True)
gb.columns=['vcnt']
gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
# ## - group of synth graphs -
deg_df = pd.DataFrame()
for g in synthetic_graphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
# Degree vs cnt
deg_df = pd.concat([deg_df, gb], axis=1) # Appends to bottom new DFs
# print gb
deg_df['mean'] = deg_df.mean(axis=1)
deg_df.index.rename('k',inplace=True)
deg_df['mean'].to_csv("Results/deg_xphrg_"+orig_graph.name+".tsv", sep='\t', header=True)
def plot_g_hstars(orig_graph, synthetic_graphs):
df = pd.DataFrame(orig_graph.degree().items())
gb = df.groupby([1]).count()
# gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
gb.index.rename('k',inplace=True)
gb.columns=['vcnt']
# k_cnt = [(x.tolist(),y.values[0]) for x,y in gb.iterrows()]
xdata = np.array([x.tolist() for x,y in gb.iterrows()])
ydata = np.array([y.values[0] for x,y in gb.iterrows()])
yerr = ydata *0.000001
fig, ax = plt.subplots()
ax.plot(gb.index.values, gb['vcnt'].values,'-o', markersize=8, markerfacecolor='w', markeredgecolor=[0,0,1], alpha=0.5, label="orig")
ofname = pwrlaw_plot(xdata, ydata,yerr)
if os.path.exists(ofname): print '... Plot save to:',ofname
deg_df = pd.DataFrame()
for g in synthetic_graphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
# Degree vs cnt
deg_df = pd.concat([deg_df, gb], axis=1) # Appends to bottom new DFs
# print gb
deg_df['mean'] = deg_df.mean(axis=1)
deg_df.index.rename('k',inplace=True)
# ax.plot(y=deg_df.mean(axis=1))
# ax.plot(y=deg_df.median(axis=1))
# ax.plot()
# orig
deg_df.mean(axis=1).plot(ax=ax,label='mean',color='r')
deg_df.median(axis=1).plot(ax=ax,label='median',color='g')
ax.fill_between(deg_df.index, deg_df.mean(axis=1) - deg_df.sem(axis=1),
deg_df.mean(axis=1) + deg_df.sem(axis=1), alpha=0.2, label="se")
# ax.plot(k_cnt)
# deg_df.plot(ax=ax)
# for x,y in k_cnt:
# if DBG: print "{}\t{}".format(x,y)
#
#
# for g in synths:
# df = pd.DataFrame(g.degree().items())
# gb = df.groupby([1]).count()
# # gb.plot(ax=ax)
# for x,y in k_cnt:
# if DBG: print "{}\t{}".format(x,y)
#
# # Curve-fit
#
plt.savefig('tmpfig', bbox_inches='tight')
def get_hrg_production_rules(edgelist_data_frame, graph_name):
from growing import derive_prules_from
df = edgelist_data_frame
try:
G = nx.from_pandas_dataframe(df, 'src', 'trg', ['ts']) # whole graph
except Exception, e:
print '==========================\n\t',
print str(e)
traceback.print_exc()
G = nx.from_pandas_dataframe(df, 'src', 'trg')
# os._exit(1)
G.name = graph_name
prules = derive_prules_from([G])
# Synthetic Graphs
hStars = grow_exact_size_hrg_graphs_from_prod_rules(prules[0], graph_name, G.number_of_nodes(),10)
print '... hStart graphs:',len(hStars)
# plot_g_hstars(G,hStars)
deg_vcnt_to_disk(G, hStars)
if 1:
metricx = ['degree']# ,'hops', 'clust', 'assort', 'kcore','eigen','gcd']
metrics.network_properties([G], metricx, hStars, name=graph_name, out_tsv=True)
if __name__ == '__main__':
parser = get_parser()
args = vars(parser.parse_args())
in_file = args['g_fname'][0]
datframes = tdf.Pandas_DataFrame_From_Edgelist([in_file])
df = datframes[0]
# g_name = os.path.basename(in_file).lstrip('out.')
g_name = os.path.basename(in_file).split('.')[1]
print '...', g_name
if args['chunglu']:
print 'Generate chunglu graphs given an edgelist'
sys.exit(0)
elif args['kron']:
print 'Generate chunglu graphs given an edgelist'
sys.exit(0)
try:
get_hrg_production_rules(df,g_name)
except Exception, e:
print 'ERROR, UNEXPECTED SAVE PLOT EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
sys.exit(0)
|
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class TFBenchmarkTest(unittest.TestCase):
def check_results_dict_not_empty(self, results):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]):
result = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(result)
def test_inference_no_configs_eager(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
eager_mode=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_configs_only_pretrain(self):
MODEL_ID = "sshleifer/tiny-distilbert-base-uncased-finetuned-sst-2-english"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
only_pretrain_model=True,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_no_configs_graph(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_with_configs_eager(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
eager_mode=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, [config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_inference_with_configs_graph(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, [config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_train_no_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_train_with_configs(self):
MODEL_ID = "sshleifer/tiny-gpt2"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=True,
inference=False,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, [config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def test_inference_encoder_decoder_with_configs(self):
MODEL_ID = "patrickvonplaten/t5-tiny-random"
config = AutoConfig.from_pretrained(MODEL_ID)
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args, configs=[config])
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU")) == 0, "Cannot do xla on CPU.")
def test_inference_no_configs_xla(self):
MODEL_ID = "sshleifer/tiny-gpt2"
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
training=False,
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
use_xla=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
results = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def test_save_csv_files(self):
MODEL_ID = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
inference=True,
save_to_csv=True,
sequence_lengths=[8],
batch_sizes=[1],
inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"),
inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"),
env_info_csv_file=os.path.join(tmp_dir, "env.csv"),
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
benchmark.run()
self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists())
def test_trace_memory(self):
MODEL_ID = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(summary):
self.assertTrue(hasattr(summary, "sequential"))
self.assertTrue(hasattr(summary, "cumulative"))
self.assertTrue(hasattr(summary, "current"))
self.assertTrue(hasattr(summary, "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
benchmark_args = TensorFlowBenchmarkArguments(
models=[MODEL_ID],
inference=True,
sequence_lengths=[8],
batch_sizes=[1],
log_filename=os.path.join(tmp_dir, "log.txt"),
log_print=True,
trace_memory_line_by_line=True,
eager_mode=True,
multi_process=False,
)
benchmark = TensorFlowBenchmark(benchmark_args)
result = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
|
|
# pylint: disable=invalid-name
# pylint: disable=g-long-ternary
# Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utils for Bzlmod."""
import base64
import hashlib
import json
import os
import pathlib
import shutil
import urllib.request
import zipfile
def download(url):
"""Download a file and return its content in bytes."""
response = urllib.request.urlopen(url)
return response.read()
def read(path):
"""Read a file and return its content in bytes."""
with open(str(path), 'rb') as f:
return f.read()
def integrity(data):
"""Calculate the integration value of the data with sha256."""
hash_value = hashlib.sha256(data)
return 'sha256-' + base64.b64encode(hash_value.digest()).decode()
def scratchFile(path, lines=None):
"""Creates a file at the given path with the given content."""
with open(str(path), 'w') as f:
if lines:
for l in lines:
f.write(l)
f.write('\n')
class Module:
"""A class to represent information of a Bazel module."""
def __init__(self, name, version):
self.name = name
self.version = version
self.archive_url = None
self.strip_prefix = ''
self.module_dot_bazel = None
self.patches = []
self.patch_strip = 0
def set_source(self, archive_url, strip_prefix=None):
self.archive_url = archive_url
self.strip_prefix = strip_prefix
return self
def set_module_dot_bazel(self, module_dot_bazel):
self.module_dot_bazel = module_dot_bazel
return self
def set_patches(self, patches, patch_strip):
self.patches = patches
self.patch_strip = patch_strip
return self
class BazelRegistry:
"""A class to help create a Bazel module project from scatch and add it into the registry."""
def __init__(self, root, registry_suffix=''):
self.root = pathlib.Path(root)
self.projects = self.root.joinpath('projects')
self.projects.mkdir(parents=True, exist_ok=True)
self.archives = self.root.joinpath('archives')
self.archives.mkdir(parents=True, exist_ok=True)
self.registry_suffix = registry_suffix
def getURL(self):
"""Return the URL of this registry."""
return self.root.resolve().as_uri()
def generateCcSource(self, name, version, deps=None, repo_names=None):
"""Generate a cc project with given dependency information.
1. The cc projects implements a hello_<lib_name> function.
2. The hello_<lib_name> function calls the same function of its
dependencies.
3. The hello_<lib_name> function prints "<caller name> =>
<lib_name@version>".
4. The BUILD file references the dependencies as their desired repo names.
Args:
name: The module name.
version: The module version.
deps: The dependencies of this module.
repo_names: The desired repository name for some dependencies.
Returns:
The generated source directory.
"""
src_dir = self.projects.joinpath(name, version)
src_dir.mkdir(parents=True, exist_ok=True)
if not deps:
deps = {}
if not repo_names:
repo_names = {}
for dep in deps:
if dep not in repo_names:
repo_names[dep] = dep
def calc_repo_name_str(dep):
if dep == repo_names[dep]:
return ''
return ', repo_name = "%s"' % repo_names[dep]
scratchFile(src_dir.joinpath('WORKSPACE'))
scratchFile(
src_dir.joinpath('MODULE.bazel'), [
'module(',
' name = "%s",' % name,
' version = "%s",' % version,
' compatibility_level = 1,',
')',
] + [
'bazel_dep(name = "%s", version = "%s"%s)' %
(dep, version, calc_repo_name_str(dep))
for dep, version in deps.items()
])
scratchFile(
src_dir.joinpath(name.lower() + '.h'), [
'#ifndef %s_H' % name.upper(),
'#define %s_H' % name.upper(),
'#include <string>',
'void hello_%s(const std::string& caller);' % name.lower(),
'#endif',
])
scratchFile(
src_dir.joinpath(name.lower() + '.cc'), [
'#include <stdio.h>',
'#include "%s.h"' % name.lower(),
] + ['#include "%s.h"' % dep.lower() for dep in deps] + [
'void hello_%s(const std::string& caller) {' % name.lower(),
' std::string lib_name = "%s@%s%s";' %
(name, version, self.registry_suffix),
' printf("%s => %s\\n", caller.c_str(), lib_name.c_str());',
] + [' hello_%s(lib_name);' % dep.lower() for dep in deps] + [
'}',
])
scratchFile(
src_dir.joinpath('BUILD'), [
'package(default_visibility = ["//visibility:public"])',
'cc_library(',
' name = "lib_%s",' % name.lower(),
' srcs = ["%s.cc"],' % name.lower(),
' hdrs = ["%s.h"],' % name.lower(),
] + ([
' deps = ["%s"],' % ('", "'.join([
'@%s//:lib_%s' % (repo_names[dep], dep.lower()) for dep in deps
])),
] if deps else []) + [
')',
])
return src_dir
def createArchive(self, name, version, src_dir):
"""Create an archive with a given source directory."""
zip_path = self.archives.joinpath('%s.%s.zip' % (name, version))
zip_obj = zipfile.ZipFile(str(zip_path), 'w')
for foldername, _, filenames in os.walk(str(src_dir)):
for filename in filenames:
filepath = os.path.join(foldername, filename)
zip_obj.write(filepath,
str(pathlib.Path(filepath).relative_to(src_dir)))
zip_obj.close()
return zip_path
def addModule(self, module):
"""Add a module into the registry."""
module_dir = self.root.joinpath('modules', module.name, module.version)
module_dir.mkdir(parents=True, exist_ok=True)
# Copy MODULE.bazel to the registry
module_dot_bazel = module_dir.joinpath('MODULE.bazel')
shutil.copy(str(module.module_dot_bazel), str(module_dot_bazel))
# Create source.json & copy patch files to the registry
source = {
'url': module.archive_url,
'integrity': integrity(download(module.archive_url)),
}
if module.strip_prefix:
source['strip_prefix'] = module.strip_prefix
if module.patches:
patch_dir = module_dir.joinpath('patches')
patch_dir.mkdir()
source['patches'] = {}
source['patch_strip'] = module.patch_strip
for patch_path in module.patches:
patch = pathlib.Path(patch_path)
source['patches'][patch.name] = integrity(read(patch))
shutil.copy(str(patch), str(patch_dir))
with module_dir.joinpath('source.json').open('w') as f:
json.dump(source, f, indent=4, sort_keys=True)
def createCcModule(self,
name,
version,
deps=None,
repo_names=None,
patches=None,
patch_strip=0):
"""Generate a cc project and add it as a module into the registry."""
src_dir = self.generateCcSource(name, version, deps, repo_names)
archive = self.createArchive(name, version, src_dir)
module = Module(name, version)
module.set_source(archive.resolve().as_uri())
module.set_module_dot_bazel(src_dir.joinpath('MODULE.bazel'))
if patches:
module.set_patches(patches, patch_strip)
self.addModule(module)
return self
|
|
#!/usr/bin/env python
from __future__ import print_function, division, unicode_literals
import os
from tempfile import TemporaryDirectory
import pika
from rethinkdb import RethinkDB
import traceback
from collections import namedtuple
import inspect
from contextlib import ExitStack
from protocolbuffers.messages_pb2 import Files, WorkerJobDescription
import logging
FORMAT = '%(asctime)s:%(name)s:%(levelname)s:%(message)s'
logging.basicConfig(format=FORMAT)
from worker.storageclient import StorageClient
from worker.fontbakery import (
Checker as FontBakeryCheckerWorker
, Distributor as FontBakeryDistributorWorker
)
from worker.diffenator import DiffenatorWorker
from worker.diffbrowsers import DiffbrowsersWorker
logger = logging.getLogger('FB_WORKER')
r = RethinkDB()
class Queue(object):
def __init__(self, channel, worker_name, end_name):
self.channel = channel
self._worker_name = worker_name
self._end_name = end_name
def _queue(self, message, queue_name):
options = pika.BasicProperties(
# TODO: do we need persistent here?
delivery_mode=2 # pika.spec.PERSISTENT_DELIVERY_MODE
)
# Default exchange
# The default exchange is a pre-declared direct exchange with no name,
# usually referred by the empty string "". When you use the default exchange,
# your message will be delivered to the queue with a name equal to the routing
# key of the message. Every queue is automatically bound to the default exchange
# with a routing key which is the same as the queue name.
routing_key = queue_name
self.channel.basic_publish(exchange=''
, routing_key=routing_key
, body=message.SerializeToString()
, properties=options)
def end(self, message):
return self._queue(message, self._end_name)
def worker(self, message):
return self._queue(message, self._worker_name)
def setLoglevel(logger, loglevel):
'''
loglevel, use: DEBUG, INFO, WARNING, ERROR, CRITICAL
'''
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logger.setLevel(numeric_level)
Setup = namedtuple('Setup', ['log_level', 'db_host', 'db_port'
, 'db_user', 'db_password'
, 'msgqueue_host', 'cache_host', 'cache_port'
, 'persistence_host', 'persistence_port'
, 'ticks_to_flush'])
def getSetup():
log_level = os.environ.get("FONTBAKERY_WORKER_LOG_LEVEL", 'INFO')
# in gcloud, we use a cluster with proxy setup
# the proxy service is called: "rethinkdb-proxy" hence:
db_host = os.environ.get("RETHINKDB_PROXY_SERVICE_HOST", None)
db_port = os.environ.get("RETHINKDB_PROXY_SERVICE_PORT", 28015)
if db_host is None:
# Fall back to "rethinkdb-driver"
db_host = os.environ.get("RETHINKDB_DRIVER_SERVICE_HOST")
db_port = os.environ.get("RETHINKDB_DRIVER_SERVICE_PORT", 28015)
db_user = os.environ.get("RETHINKDB_USER", 'admin')
db_password = os.environ.get("RETHINKDB_PASSWORD", None)
# FIXME: Where would BROKER be set? RABBITMQ_SERVICE_SERVICE_HOST is
# set by kubernetes for the service named "rabbitmq-service" AFAIK
msgqueue_host = os.environ.get("RABBITMQ_SERVICE_SERVICE_HOST", os.environ.get("BROKER"))
cache_host = os.environ.get("FONTBAKERY_STORAGE_CACHE_SERVICE_HOST")
cache_port = os.environ.get("FONTBAKERY_STORAGE_CACHE_SERVICE_PORT", 50051)
persistence_host = os.environ.get("FONTBAKERY_STORAGE_PERSISTENCE_SERVICE_HOST")
persistence_port = os.environ.get("FONTBAKERY_STORAGE_PERSISTENCE_SERVICE_PORT", 50051)
# 1 reports every test result to the database and creates a good
# live report granularity, but it also slows the database down.
# For a massive scale of checkers, this can be a major tool to tune
# performance.
ticks_to_flush = int(os.environ.get("FONTBAKERY_CHECKER_TICKS_TO_FLUSH", 1))
return Setup(log_level, db_host, db_port, db_user, db_password
, msgqueue_host, cache_host, cache_port
, persistence_host, persistence_port
, ticks_to_flush)
def parse_job(workers, body):
try:
job_description = WorkerJobDescription()
job_description.ParseFromString(body)
logger.debug('Got job description: %s', job_description)
except Exception as e:
# Can not report this appropriately
logger.exception('Can\'t parse body %s', e)
raise
worker_name = job_description.worker_name
Worker = workers[worker_name]
job = Worker.JobType()
if not job_description.job.Unpack(job):
raise ValueError('job_description.job.Unpack Failed: expected type '
+ '{}:"{}" doesn\'t match "{}".'
.format(Worker.JobType, Worker.JobType.DESCRIPTOR.full_name
, job_description.job.TypeName()))
return Worker, job;
def getMandatoryParams(callable):
"""
If callable is a class, we'll get the non-optional parameters of the
constructor, i.e. the parameters of `__init__` without the `self`.
"""
params = inspect.signature(callable).parameters
return [name for name, param in params.items() \
if param.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD \
# If the parameter has no default value, this
# attribute is set to Parameter.empty.
and param.default is inspect.Parameter.empty]
class CTX(object):
def __init__(self, Worker, resources):
self._worker = Worker(*resources)
def __enter__(self):
return self._worker.run
def __exit__(self, *exc):
'''
exc = ExceptionClass, exceptionClassInstance, classTracebackInstance
map(type, exc) = [<class 'type'>, <class 'ExceptionClass'>, <class 'traceback'>]
'''
# FIXME: raise if the cause of the error is a resource that got lost
# i.e. database, cache, queue because then kubernetes can restart
# the pod with its back-off delay
tb = None
if exc[0]:
# same as print_exception() in an except block
tb = ''.join(traceback.format_exception(*exc))
if hasattr(self._worker, 'finalize'):
# Should return True if exc was dealt ok with
# otherwise it will be re-raised
# FIXME: finalize will need some resources as well!
return self._worker.finalize(tb, *exc)
# If there was an exception but no finalize the default is to
# re-raise the exception, what happens when we return False.
# Without exception there's nothing to raise anyways.
return False
def consume(workers, static_resources, resource_managers, method, properties, body):
Worker, job = parse_job(workers, body)
logger.info('consuming a job for: %s with %s %s', Worker, method, properties)
with ExitStack() as stack:
# If __enter__ raises it's NOT handled by __exit__
# which is actually a good thing, only the call
# to worker, the content in the `with` block
# will be handled by __exit__.
# We don't do optional arguments
dependencies = getMandatoryParams(Worker)
resources = []
# somehow there should be a way to get resources like
# a tmpdir context managed ....
for name in dependencies:
if name == 'job':
resources.append(job)
elif name in resource_managers:
# for e.g. tmp_dir
context_manager = resource_managers[name]
resources.append(stack.enter_context(context_manager()))
else:
# If name is missing a KeyError will be raised
# which seems appropriate.
resources.append(static_resources[name])
# enter context and execute
run = stack.enter_context(CTX(Worker, resources))
run()
def main():
"""
We don't handle uncaught exceptions here. If this fails kubernetes
will restart the pod and take care that the times between restarts
gets longer.
"""
setup = getSetup()
setLoglevel(logger, setup.log_level)
# DEBUG is a lot of output!
setLoglevel(logging.getLogger('fontdiffenator'), 'INFO')
# setLoglevel(logging.getLogger('fontdiffenator'), setup.log_level)
logger.info('loglevel: ' + setup.log_level)
logger.info(' '.join(['RethinkDB', 'HOST', setup.db_host, 'PORT', setup.db_port]))
rdb_connection = r.connect(host=setup.db_host, port=setup.db_port
, user=setup.db_user, password=setup.db_password
, timeout=120)
rdb_name = 'fontbakery'
queue_worker_name='fontbakery-worker'
queue_end_name='fontbakery-worker-cleanup'
# http://pika.readthedocs.io/en/latest/examples/heartbeat_and_blocked_timeouts.html
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=setup.msgqueue_host
# for long running tasks
, heartbeat=20*60 # 20 minutes
# , socket_timeout=5
))
queue_channel = connection.channel()
queue_channel.basic_qos(prefetch_count=1)
queue_channel.queue_declare(queue=queue_worker_name, durable=True)
queue_channel.queue_declare(queue=queue_end_name, durable=True)
static_resources = dict(
logging=logger
, queue=Queue(queue_channel, queue_worker_name, queue_end_name)
, rethinkdb=(r, rdb_connection, rdb_name)
# if we want to read more data types this must probably change?
, cache=StorageClient(setup.cache_host, setup.cache_port, Files)
, persistence=StorageClient(setup.persistence_host, setup.persistence_port, Files)
# hmm, this is very specific for FontBakeryCheckerWorker
# probably it should read its own, uniqe setup values, as done in
# e.g. in the `diffbrowsers` moduke of `DiffbrowsersWorker`
, ticks_to_flush=setup.ticks_to_flush
)
resource_managers = dict(
tmp_directory=TemporaryDirectory
)
workers = dict(
fontbakery=FontBakeryDistributorWorker
, fontbakery_checker=FontBakeryCheckerWorker
, diffenator=DiffenatorWorker
, diffbrowsers=DiffbrowsersWorker
)
logger.info('Waiting for messages in %s...', queue_worker_name)
# BlockingChannel has a generator
# Why `no_ack=True`: A job can run much longer than the broker will
# wait for an ack and there's no way to give a good estimate of how
# long a job will take. If the ack times out, the job will be
# reissued by the broker, creating an infinite loop.
# Ack immediately and see how to handle failed jobs at another
# point of time.
for method, properties, body in queue_channel.consume(queue_worker_name):
logger.info('consuming incoming message ...')
try:
consume(workers, static_resources, resource_managers, method, properties, body)
except Exception as e:
# exceptions that come here should restart the pod!
# however that way, we don't see the exception log easily
# which is bad for debugging
logger.exception('consume FAILED: %s', e)
finally:
queue_channel.basic_ack(delivery_tag=method.delivery_tag)
# FIXME: we should not always ack the job if can't even mark it as
# failed in the RethinkDB-doc (or wherever???)
# publishing the job again, with increased retry count would be an
# option. (Needs a retry count field on the job).
# TODO: to keep jobs from disappearing (and also from being redelivered
# forever), a dead-end queue could be created.
# Also nice as a tactic would be to re-insert the job into the original
# queue, but with an incremented retry count. After {n} retries we could
# move it to the dead-end. That way, a temporary db failure or such would
# not be a "job killer".
# BUT: for now we need to see cases where, how and why this actually fails.
if __name__ == '__main__':
main()
|
|
from troposphere import (
Parameter,
Ref,
Output,
Tags,
GetAtt,
Base64,
Join,
Equals,
cloudwatch as cw,
ec2,
elasticloadbalancing as elb,
autoscaling as asg
)
from utils.cfn import get_recent_ami
from utils.constants import (
ALLOW_ALL_CIDR,
EC2_INSTANCE_TYPES,
GRAPHITE,
HTTP,
HTTPS,
POSTGRESQL,
REDIS,
RELP,
SSH,
STATSITE,
VPC_CIDR
)
from majorkirby import StackNode, MKUnresolvableInputError
class Application(StackNode):
INPUTS = {
'Tags': ['global:Tags'],
'Region': ['global:Region'],
'StackType': ['global:StackType'],
'StackColor': ['global:StackColor'],
'KeyName': ['global:KeyName'],
'AvailabilityZones': ['global:AvailabilityZones',
'VPC:AvailabilityZones'],
'RDSPassword': ['global:RDSPassword', 'DataPlane:RDSPassword'],
'AppServerInstanceType': ['global:AppServerInstanceType'],
'AppServerAMI': ['global:AppServerAMI'],
'AppServerInstanceProfile': ['global:AppServerInstanceProfile'],
'AppServerAutoScalingDesired': ['global:AppServerAutoScalingDesired'],
'AppServerAutoScalingMin': ['global:AppServerAutoScalingMin'],
'AppServerAutoScalingMax': ['global:AppServerAutoScalingMax'],
'SSLCertificateARN': ['global:SSLCertificateARN'],
'PublicSubnets': ['global:PublicSubnets', 'VPC:PublicSubnets'],
'PrivateSubnets': ['global:PrivateSubnets', 'VPC:PrivateSubnets'],
'VpcId': ['global:VpcId', 'VPC:VpcId'],
'GlobalNotificationsARN': ['global:GlobalNotificationsARN'],
'BlueTileServerDistributionEndpoint':
['global:BlueTileServerDistributionEndpoint',
'TileDeliveryNetwork:BlueTileServerDistributionEndpoint'],
'GreenTileServerDistributionEndpoint':
['global:GreenTileServerDistributionEndpoint',
'TileDeliveryNetwork:GreenTileServerDistributionEndpoint'],
'ITSIBaseURL': ['global:ITSIBaseURL'],
'ITSISecretKey': ['global:ITSISecretKey'],
}
DEFAULTS = {
'Tags': {},
'Region': 'us-east-1',
'StackType': 'Staging',
'StackColor': 'Green',
'KeyName': 'mmw-stg',
'AppServerInstanceType': 't2.micro',
'AppServerInstanceProfile': 'AppServerInstanceProfile',
'AppServerAutoScalingDesired': '1',
'AppServerAutoScalingMin': '1',
'AppServerAutoScalingMax': '1',
}
ATTRIBUTES = {
'StackType': 'StackType',
'StackColor': 'StackColor',
}
def set_up_stack(self):
super(Application, self).set_up_stack()
tags = self.get_input('Tags').copy()
tags.update({'StackType': 'Application'})
self.default_tags = tags
self.region = self.get_input('Region')
self.add_description('Application server stack for MMW')
# Parameters
self.color = self.add_parameter(Parameter(
'StackColor', Type='String',
Description='Stack color', AllowedValues=['Blue', 'Green']
), 'StackColor')
self.keyname = self.add_parameter(Parameter(
'KeyName', Type='String',
Description='Name of an existing EC2 key pair'
), 'KeyName')
self.availability_zones = self.add_parameter(Parameter(
'AvailabilityZones', Type='CommaDelimitedList',
Description='Comma delimited list of availability zones'
), 'AvailabilityZones')
self.rds_password = self.add_parameter(Parameter(
'RDSPassword', Type='String', NoEcho=True,
Description='Database password',
), 'RDSPassword')
self.app_server_instance_type = self.add_parameter(Parameter(
'AppServerInstanceType', Type='String', Default='t2.micro',
Description='Application server EC2 instance type',
AllowedValues=EC2_INSTANCE_TYPES,
ConstraintDescription='must be a valid EC2 instance type.'
), 'AppServerInstanceType')
self.app_server_ami = self.add_parameter(Parameter(
'AppServerAMI', Type='String',
Default=self.get_recent_app_server_ami(),
Description='Application server AMI'
), 'AppServerAMI')
self.app_server_instance_profile = self.add_parameter(Parameter(
'AppServerInstanceProfile', Type='String',
Default='AppServerInstanceProfile',
Description='Application server instance profile'
), 'AppServerInstanceProfile')
self.app_server_auto_scaling_desired = self.add_parameter(Parameter(
'AppServerAutoScalingDesired', Type='String', Default='1',
Description='Application server AutoScalingGroup desired'
), 'AppServerAutoScalingDesired')
self.app_server_auto_scaling_min = self.add_parameter(Parameter(
'AppServerAutoScalingMin', Type='String', Default='1',
Description='Application server AutoScalingGroup minimum'
), 'AppServerAutoScalingMin')
self.app_server_auto_scaling_max = self.add_parameter(Parameter(
'AppServerAutoScalingMax', Type='String', Default='1',
Description='Application server AutoScalingGroup maximum'
), 'AppServerAutoScalingMax')
self.ssl_certificate_arn = self.add_parameter(Parameter(
'SSLCertificateARN', Type='String',
Description='ARN for a SSL certificate stored in IAM'
), 'SSLCertificateARN')
self.public_subnets = self.add_parameter(Parameter(
'PublicSubnets', Type='CommaDelimitedList',
Description='A list of public subnets'
), 'PublicSubnets')
self.private_subnets = self.add_parameter(Parameter(
'PrivateSubnets', Type='CommaDelimitedList',
Description='A list of private subnets'
), 'PrivateSubnets')
self.vpc_id = self.add_parameter(Parameter(
'VpcId', Type='String',
Description='VPC ID'
), 'VpcId')
self.notification_topic_arn = self.add_parameter(Parameter(
'GlobalNotificationsARN', Type='String',
Description='ARN for an SNS topic to broadcast notifications'
), 'GlobalNotificationsARN')
self.blue_tile_distribution_endpoint = self.add_parameter(Parameter(
'BlueTileServerDistributionEndpoint', Type='String',
Description='Endpoint for blue tile CloudFront distribution'
), 'BlueTileServerDistributionEndpoint')
self.green_tile_distribution_endpoint = self.add_parameter(Parameter(
'GreenTileServerDistributionEndpoint', Type='String',
Description='Endpoint for green tile CloudFront distribution'
), 'GreenTileServerDistributionEndpoint')
self.itsi_base_url = self.add_parameter(Parameter(
'ITSIBaseURL', Type='String',
Description='Base URL for ITSI portal'
), 'ITSIBaseURL')
self.itsi_secret_key = self.add_parameter(Parameter(
'ITSISecretKey', Type='String', NoEcho=True,
Description='Secret key for ITSI portal integration'
), 'ITSISecretKey')
app_server_lb_security_group, \
app_server_security_group = self.create_security_groups()
app_server_lb = self.create_load_balancer(app_server_lb_security_group)
self.create_auto_scaling_resources(app_server_security_group,
app_server_lb)
self.create_cloud_watch_resources(app_server_lb)
self.add_output(Output('AppServerLoadBalancerEndpoint',
Value=GetAtt(app_server_lb, 'DNSName')))
self.add_output(Output('AppServerLoadBalancerHostedZoneNameID',
Value=GetAtt(app_server_lb,
'CanonicalHostedZoneNameID')))
def get_recent_app_server_ami(self):
try:
app_server_ami_id = self.get_input('AppServerAMI')
except MKUnresolvableInputError:
app_server_ami_id = get_recent_ami(self.aws_profile, 'mmw-app-*')
return app_server_ami_id
def create_security_groups(self):
app_server_lb_security_group_name = 'sgAppServerLoadBalancer'
app_server_lb_security_group = self.add_resource(ec2.SecurityGroup(
app_server_lb_security_group_name,
GroupDescription='Enables access to application servers via a '
'load balancer',
VpcId=Ref(self.vpc_id),
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p,
ToPort=p
)
for p in [HTTP, HTTPS]
],
SecurityGroupEgress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [HTTP]
],
Tags=self.get_tags(Name=app_server_lb_security_group_name)
))
app_server_security_group_name = 'sgAppServer'
app_server_security_group = self.add_resource(ec2.SecurityGroup(
app_server_security_group_name,
DependsOn='sgAppServerLoadBalancer',
GroupDescription='Enables access to application servers',
VpcId=Ref(self.vpc_id),
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [SSH, HTTP]
] + [
ec2.SecurityGroupRule(
IpProtocol='tcp', SourceSecurityGroupId=Ref(sg),
FromPort=HTTP, ToPort=HTTP
)
for sg in [app_server_lb_security_group]
],
SecurityGroupEgress=[
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [GRAPHITE, POSTGRESQL, REDIS, STATSITE, RELP]
] + [
ec2.SecurityGroupRule(
IpProtocol='udp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p
)
for p in [STATSITE]
] + [
ec2.SecurityGroupRule(
IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p,
ToPort=p
)
for p in [HTTP, HTTPS]
],
Tags=self.get_tags(Name=app_server_security_group_name)
))
return app_server_lb_security_group, app_server_security_group
def create_load_balancer(self, app_server_lb_security_group):
app_server_lb_name = 'elbAppServer'
return self.add_resource(elb.LoadBalancer(
app_server_lb_name,
ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
Enabled=True,
Timeout=300,
),
CrossZone=True,
SecurityGroups=[Ref(app_server_lb_security_group)],
Listeners=[
elb.Listener(
LoadBalancerPort='80',
InstancePort='80',
Protocol='HTTP',
),
elb.Listener(
LoadBalancerPort='443',
InstancePort='80',
Protocol='HTTPS',
SSLCertificateId=Ref(self.ssl_certificate_arn)
)
],
HealthCheck=elb.HealthCheck(
Target='HTTP:80/health-check/',
HealthyThreshold='3',
UnhealthyThreshold='2',
Interval='30',
Timeout='5',
),
Subnets=Ref(self.public_subnets),
Tags=self.get_tags(Name=app_server_lb_name)
))
def create_auto_scaling_resources(self, app_server_security_group,
app_server_lb):
self.add_condition('BlueCondition', Equals('Blue', Ref(self.color)))
self.add_condition('GreenCondition', Equals('Green', Ref(self.color)))
blue_app_server_launch_config = self.add_resource(
asg.LaunchConfiguration(
'lcAppServerBlue',
Condition='BlueCondition',
ImageId=Ref(self.app_server_ami),
IamInstanceProfile=Ref(self.app_server_instance_profile),
InstanceType=Ref(self.app_server_instance_type),
KeyName=Ref(self.keyname),
SecurityGroups=[Ref(app_server_security_group)],
UserData=Base64(
Join('', self.get_cloud_config(
self.blue_tile_distribution_endpoint)))
))
self.add_resource(
asg.AutoScalingGroup(
'asgAppServerBlue',
AvailabilityZones=Ref(self.availability_zones),
Condition='BlueCondition',
Cooldown=300,
DesiredCapacity=Ref(self.app_server_auto_scaling_desired),
HealthCheckGracePeriod=600,
HealthCheckType='ELB',
LaunchConfigurationName=Ref(blue_app_server_launch_config),
LoadBalancerNames=[Ref(app_server_lb)],
MaxSize=Ref(self.app_server_auto_scaling_max),
MinSize=Ref(self.app_server_auto_scaling_min),
NotificationConfigurations=[
asg.NotificationConfigurations(
TopicARN=Ref(self.notification_topic_arn),
NotificationTypes=[
asg.EC2_INSTANCE_LAUNCH,
asg.EC2_INSTANCE_LAUNCH_ERROR,
asg.EC2_INSTANCE_TERMINATE,
asg.EC2_INSTANCE_TERMINATE_ERROR
]
)
],
VPCZoneIdentifier=Ref(self.private_subnets),
Tags=[asg.Tag('Name', 'AppServer', True)])
)
green_app_server_launch_config = self.add_resource(
asg.LaunchConfiguration(
'lcAppServerGreen',
Condition='GreenCondition',
ImageId=Ref(self.app_server_ami),
IamInstanceProfile=Ref(self.app_server_instance_profile),
InstanceType=Ref(self.app_server_instance_type),
KeyName=Ref(self.keyname),
SecurityGroups=[Ref(app_server_security_group)],
UserData=Base64(
Join('', self.get_cloud_config(
self.green_tile_distribution_endpoint)))
))
self.add_resource(
asg.AutoScalingGroup(
'asgAppServerGreen',
AvailabilityZones=Ref(self.availability_zones),
Condition='GreenCondition',
Cooldown=300,
DesiredCapacity=Ref(self.app_server_auto_scaling_desired),
HealthCheckGracePeriod=600,
HealthCheckType='ELB',
LaunchConfigurationName=Ref(green_app_server_launch_config),
LoadBalancerNames=[Ref(app_server_lb)],
MaxSize=Ref(self.app_server_auto_scaling_max),
MinSize=Ref(self.app_server_auto_scaling_min),
NotificationConfigurations=[
asg.NotificationConfigurations(
TopicARN=Ref(self.notification_topic_arn),
NotificationTypes=[
asg.EC2_INSTANCE_LAUNCH,
asg.EC2_INSTANCE_LAUNCH_ERROR,
asg.EC2_INSTANCE_TERMINATE,
asg.EC2_INSTANCE_TERMINATE_ERROR
]
)
],
VPCZoneIdentifier=Ref(self.private_subnets),
Tags=[asg.Tag('Name', 'AppServer', True)])
)
def get_cloud_config(self, tile_distribution_endpoint):
return ['#cloud-config\n',
'\n',
'write_files:\n',
' - path: /etc/mmw.d/env/MMW_STACK_COLOR\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.color), '\n',
' - path: /etc/mmw.d/env/MMW_DB_PASSWORD\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.rds_password), '\n',
' - path: /etc/mmw.d/env/MMW_TILER_HOST\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(tile_distribution_endpoint), '\n',
' - path: /etc/mmw.d/env/MMW_ITSI_BASE_URL\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.itsi_base_url), '\n',
' - path: /etc/mmw.d/env/MMW_ITSI_SECRET_KEY\n',
' permissions: 0750\n',
' owner: root:mmw\n',
' content: ', Ref(self.itsi_secret_key)]
def create_cloud_watch_resources(self, app_server_lb):
self.add_resource(cw.Alarm(
'alarmAppServerBackend4XX',
AlarmDescription='Application server backend 4XXs',
AlarmActions=[Ref(self.notification_topic_arn)],
Statistic='Sum',
Period=300,
Threshold='20',
EvaluationPeriods=1,
ComparisonOperator='GreaterThanThreshold',
MetricName='HTTPCode_Backend_4XX',
Namespace='AWS/ELB',
Dimensions=[
cw.MetricDimension(
'metricLoadBalancerName',
Name='LoadBalancerName',
Value=Ref(app_server_lb)
)
],
))
self.add_resource(cw.Alarm(
'alarmAppServerBackend5XX',
AlarmDescription='Application server backend 5XXs',
AlarmActions=[Ref(self.notification_topic_arn)],
Statistic='Sum',
Period=60,
Threshold='0',
EvaluationPeriods=1,
ComparisonOperator='GreaterThanThreshold',
MetricName='HTTPCode_Backend_5XX',
Namespace='AWS/ELB',
Dimensions=[
cw.MetricDimension(
'metricLoadBalancerName',
Name='LoadBalancerName',
Value=Ref(app_server_lb)
)
],
))
def get_tags(self, **kwargs):
"""Helper method to return Troposphere tags + default tags
Args:
**kwargs: arbitrary keyword arguments to be used as tags
"""
kwargs.update(self.default_tags)
return Tags(**kwargs)
|
|
#!/usr/bin/python
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
module: bigip_ssl_certificate
short_description: Import/Delete certificates from BIG-IP
description:
- This module will import/delete SSL certificates on BIG-IP LTM.
Certificates can be imported from certificate and key files on the local
disk, in PEM format.
version_added: 2.2
options:
cert_content:
description:
- When used instead of 'cert_src', sets the contents of a certificate directly
to the specified value. This is used with lookup plugins or for anything
with formatting or templating. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
key_content:
description:
- When used instead of 'key_src', sets the contents of a certificate key
directly to the specified value. This is used with lookup plugins or for
anything with formatting or templating. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
state:
description:
- Certificate and key state. This determines if the provided certificate
and key is to be made C(present) on the device or C(absent).
required: true
default: present
choices:
- present
- absent
partition:
description:
- BIG-IP partition to use when adding/deleting certificate.
required: false
default: Common
name:
description:
- SSL Certificate Name. This is the cert/key pair name used
when importing a certificate/key into the F5. It also
determines the filenames of the objects on the LTM
(:Partition:name.cer_11111_1 and :Partition_name.key_11111_1).
required: true
cert_src:
description:
- This is the local filename of the certificate. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
key_src:
description:
- This is the local filename of the private key. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
passphrase:
description:
- Passphrase on certificate private key
required: false
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- This module does not behave like other modules that you might include in
roles where referencing files or templates first looks in the role's
files or templates directory. To have it behave that way, use the Ansible
file or template lookup (see Examples). The lookups behave as expected in
a role context.
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 1.5.0
- BIG-IP >= v12
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Import PEM Certificate from local disk
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
cert_src: "/path/to/cert.crt"
key_src: "/path/to/key.key"
delegate_to: localhost
- name: Use a file lookup to import PEM Certificate
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
cert_content: "{{ lookup('file', '/path/to/cert.crt') }}"
key_content: "{{ lookup('file', '/path/to/key.key') }}"
delegate_to: localhost
- name: "Delete Certificate"
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
delegate_to: localhost
'''
RETURN = '''
cert_name:
description: The name of the certificate that the user provided
returned:
- created
type: string
sample: "cert1"
key_filename:
description:
- The name of the SSL certificate key. The C(key_filename) and
C(cert_filename) will be similar to each other, however the
C(key_filename) will have a C(.key) extension.
returned:
- created
type: string
sample: "cert1.key"
key_checksum:
description: SHA1 checksum of the key that was provided.
return:
- changed
- created
type: string
sample: "cf23df2207d99a74fbe169e3eba035e633b65d94"
key_source_path:
description: Path on BIG-IP where the source of the key is stored
return: created
type: string
sample: "/var/config/rest/downloads/cert1.key"
cert_filename:
description:
- The name of the SSL certificate. The C(cert_filename) and
C(key_filename) will be similar to each other, however the
C(cert_filename) will have a C(.crt) extension.
returned:
- created
type: string
sample: "cert1.crt"
cert_checksum:
description: SHA1 checksum of the cert that was provided.
return:
- changed
- created
type: string
sample: "f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0"
cert_source_path:
description: Path on BIG-IP where the source of the certificate is stored.
return: created
type: string
sample: "/var/config/rest/downloads/cert1.crt"
'''
import hashlib
import StringIO
import os
import re
from ansible.module_utils.f5_utils import *
class Parameters(AnsibleF5Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
def _get_hash(self, content):
k = hashlib.sha1()
s = StringIO.StringIO(content)
while True:
data = s.read(1024)
if not data:
break
k.update(data)
return k.hexdigest()
@property
def checksum(self):
if self._values['checksum'] is None:
return None
pattern = r'SHA1:\d+:(?P<value>[\w+]{40})'
matches = re.match(pattern, self._values['checksum'])
if matches:
return matches.group('value')
else:
return None
class KeyParameters(Parameters):
api_map = {
'sourcePath': 'key_source_path'
}
updatables = ['key_source_path']
returnables = ['key_filename', 'key_checksum', 'key_source_path']
api_attributes = ['passphrase', 'sourcePath']
@property
def key_filename(self):
fname, fext = os.path.splitext(self.name)
if fext == '':
return fname + '.key'
else:
return self.name
@property
def key_checksum(self):
if self.key_content is None:
return None
return self._get_hash(self.key_content)
@property
def key_src(self, value):
try:
with open(value) as fh:
self.key_content = fh.read()
except IOError:
raise F5ModuleError(
"The specified 'key_src' does not exist"
)
@property
def key_source_path(self):
result = 'file://' + os.path.join(
BaseManager.download_path,
self.key_filename
)
return result
class CertParameters(Parameters):
api_map = {
'sourcePath': 'cert_source_path'
}
updatables = ['cert_source_path']
returnables = ['cert_filename', 'cert_checksum', 'cert_source_path']
api_attributes = ['sourcePath']
@property
def cert_checksum(self):
if self.cert_content is None:
return None
return self._get_hash(self.cert_content)
@property
def cert_filename(self):
fname, fext = os.path.splitext(self.name)
if fext == '':
return fname + '.crt'
else:
return self.name
@property
def cert_source_path(self):
result = 'file://' + os.path.join(
BaseManager.download_path,
self.cert_filename
)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
def exec_module(self):
manager1 = self.get_manager('certificate')
manager2 = self.get_manager('key')
return self.execute_managers([manager1, manager2])
def execute_managers(self, managers):
results = {}
for manager in managers:
result = manager.exec_module()
for k,v in iteritems(result):
if k == 'changed':
if v is True:
results['changed'] = True
else:
results[k] = v
return results
def get_manager(self, type):
if type == 'certificate':
return CertificateManager(self.client)
elif type =='key':
return KeyManager(self.client)
class BaseManager(object):
download_path = '/var/config/rest/downloads'
def __init__(self, client):
self.client = client
self.have = None
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the Wide IP")
return True
class CertificateManager(BaseManager):
def __init__(self, client):
super(CertificateManager, self).__init__(client)
self.want = CertParameters(self.client.module.params)
self.changes = CertParameters()
def _set_changed_options(self):
changed = {}
try:
for key in CertParameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = CertParameters(changed)
except Exception:
pass
def _update_changed_options(self):
changed = {}
try:
for key in CertParameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if self.want.cert_checksum != self.have.checksum:
changed['cert_checksum'] = self.want.cert_checksum
if changed:
self.changes = CertParameters(changed)
return True
except Exception:
pass
return False
def exists(self):
result = self.client.api.tm.sys.file.ssl_certs.ssl_cert.exists(
name=self.want.cert_filename,
partition=self.want.partition
)
return result
def present(self):
if self.want.cert_content is None:
return False
return super(CertificateManager, self).present()
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
cstring = StringIO.StringIO(self.want.cert_content)
self.client.api.shared.file_transfer.uploads.upload_stringio(
cstring, self.want.cert_filename
)
resource = self.client.api.tm.sys.file.ssl_certs.ssl_cert.load(
name=self.want.cert_filename,
partition=self.want.partition
)
resource.update()
def create_on_device(self):
cstring = StringIO.StringIO(self.want.cert_content)
self.client.api.shared.file_transfer.uploads.upload_stringio(
cstring, self.want.cert_filename
)
self.client.api.tm.sys.file.ssl_certs.ssl_cert.create(
sourcePath=self.want.cert_source_path,
name=self.want.cert_filename,
partition=self.want.partition
)
def read_current_from_device(self):
resource = self.client.api.tm.sys.file.ssl_certs.ssl_cert.load(
name=self.want.cert_filename,
partition=self.want.partition
)
result = resource.attrs
return CertParameters(result)
def remove_from_device(self):
resource = self.client.api.tm.sys.file.ssl_certs.ssl_cert.load(
name=self.want.cert_filename,
partition=self.want.partition
)
resource.delete()
class KeyManager(BaseManager):
def __init__(self, client):
super(KeyManager, self).__init__(client)
self.want = KeyParameters(self.client.module.params)
self.changes = KeyParameters()
def _set_changed_options(self):
changed = {}
try:
for key in KeyParameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
except Exception:
pass
def _update_changed_options(self):
changed = {}
try:
for key in CertParameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if self.want.key_checksum != self.have.checksum:
changed['key_checksum'] = self.want.key_checksum
if changed:
self.changes = CertParameters(changed)
return True
except Exception:
pass
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
kstring = StringIO.StringIO(self.want.key_content)
self.client.api.shared.file_transfer.uploads.upload_stringio(
kstring, self.want.key_filename
)
resource = self.client.api.tm.sys.file.ssl_keys.ssl_key.load(
name=self.want.key_filename,
partition=self.want.partition
)
resource.update()
def exists(self):
result = self.client.api.tm.sys.file.ssl_keys.ssl_key.exists(
name=self.want.key_filename,
partition=self.want.partition
)
return result
def present(self):
if self.want.key_content is None:
return False
return super(KeyManager, self).present()
def read_current_from_device(self):
resource = self.client.api.tm.sys.file.ssl_keys.ssl_key.load(
name=self.want.key_filename,
partition=self.want.partition
)
result = resource.attrs
return KeyParameters(result)
def create_on_device(self):
kstring = StringIO.StringIO(self.want.key_content)
self.client.api.shared.file_transfer.uploads.upload_stringio(
kstring, self.want.key_filename
)
self.client.api.tm.sys.file.ssl_keys.ssl_key.create(
sourcePath=self.want.key_source_path,
name=self.want.key_filename,
partition=self.want.partition
)
def remove_from_device(self):
resource = self.client.api.tm.sys.file.ssl_keys.ssl_key.load(
name=self.want.key_filename,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
deprecated = ['key_src', 'cert_src']
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(
type='str',
required=True
),
cert_content=dict(
type='str',
default=None
),
cert_src=dict(
type='path',
default=None
),
key_content=dict(
type='str',
default=None
),
key_src=dict(
type='path',
default=None
),
passphrase=dict(
type='str',
default=None,
no_log=True
),
state=dict(
required=False,
default='present',
choices=['absent', 'present']
)
)
self.mutually_exclusive = [
['key_content', 'key_src'],
['cert_content', 'cert_src']
]
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
mutually_exclusive=spec.mutually_exclusive,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
|
from app import db, app_log
from app.handler.error_handler import NotFound
from app.model.city import City
from app.model.region import Region
from app.model.country import Country
from app.model.status import Status
from app.model.units import Units
from app.model.label import Label
from app.model.categories import Categories
from app.model.resource_set import ResourceSet
from app.model.receipt import Receipt
from app.model.data import Data
from app.model.link_rs_data import LinkRsData
from app.model.user_info import UserInfo
from app.model.user_account import UserAccount
__author__ = 'Xiaoxiao.Xiong'
class DBHelper(object):
"""
Database handler, do CRUD
"""
def __init__(self):
pass
@staticmethod
def set_user(obj):
"""
Insert a user account into table UserAccount.
:param obj: Json, {username:'', password:''}
:return: Object, if insert successfully return an instance of UserAccount , otherwise return None
"""
try:
user = UserAccount(**obj)
db.session.add(user)
db.session.commit()
return user
except Exception as e:
db.session.rollback()
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def check_username(username):
try:
account = UserAccount.query.filter_by(username=username).first()
if account is not None:
return False
else:
return True
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_ext_id(obj):
"""
Check a user account.
:param obj: Json, {username:'', password:''}
:return: Object/String, if account existing return ext_id, otherwise return None
"""
try:
account = UserAccount.query.filter_by(username=obj['username']).first()
if account is None:
return None
if account.password == obj['password']:
return account.ext_id
else:
return None
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_user_id(ext_id):
"""
Get user_id via ext_id
:param ext_id: String
:return: Boolean/String
"""
try:
account = UserAccount.query.filter_by(ext_id=ext_id).first()
if account is not None:
return account.id
else:
return False
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def check_token(ext_id):
"""
Inspect whether ext_id is existing
:param ext_id: String, unique identity, which like both OpenID and Token
:return: Boolean if ext_id existing return True, otherwise return False
"""
try:
flag = UserAccount.query.filter_by(ext_id=ext_id).first()
if flag is None or not flag:
return False
else:
return True
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def set_info(email):
"""
Insert an info record into UserInfo table
:param email: String
:return:
"""
try:
obj = UserInfo.query.filter_by(email=email).first()
if obj is not None:
return obj
else:
info = UserInfo(email)
db.session.add(info)
db.session.commit()
return info
except Exception as e:
db.session.rollback()
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_info(ext_id):
"""
Get personal information
:param ext_id: String, unique identity, which like both OpenID and Token
:return: Object
"""
try:
user = UserAccount.query.filter_by(ext_id=ext_id).first()
if user is not None:
info = {
'email': user.user_info.email,
'firstName': user.user_info.firstName,
'lastName': user.user_info.lastName,
'gender': user.user_info.gender,
'address': user.user_info.address_1,
'city': user.user_info.city_id,
'region': user.user_info.region_id,
'country': user.user_info.country_id
}
return info
else:
return False
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def update_info():
"""
Update personal information
:return: Boolean
"""
try:
pass
except Exception as e:
db.session.rollback()
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def set_receipt(receipt):
"""
Insert a receipt record into Receipt table
:param receipt: Object
:return: Boolean
"""
try:
row = Receipt(**receipt)
db.session.add(row)
db.session.commit()
except Exception as e:
db.session.rollback()
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_receipt():
"""
Not to be used in this version
Get receipt record from database
:return:
"""
try:
pass
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def update_receipt(obj):
"""
Update the status of a consent receipt
:param obj: Object, {'receipt_id':'','authorization_status':''}
:return: Boolean
"""
try:
receipt = Receipt.query.filter_by(id=obj['receipt_id']).first()
receipt.auth_status = obj['authorization_status']
db.session.commit()
except Exception as e:
db.session.rollback()
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_resource_set(user_id):
"""
Expired
Get resource set of a specific user
:param user_id: Int, unique id
:return: Object,
sample = [
{
"name":"Health",
"categories":[
"Height",
"HeartRate"
]
}
]
"""
try:
resource_sets = []
# get all resource sets of this user
rss = ResourceSet.query.filter_by(user_id=user_id).all()
# get all categories of each resource set
for rs in rss:
s = {
'name': rs.name,
'categories': []
}
results = LinkRsData.query.filter_by(resource_set_id=rs.id).all()
for row in results:
s['categories'].append(row.categories.name)
resource_sets.append(s)
return resource_sets
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def set_resource_set(user_id, rs_id, categories):
"""
Create a new resource set
:param user_id Int
:param rs_id String
:param categories List
:return: Boolean, True/False
"""
try:
rs = ResourceSet(user_id, rs_id)
db.session.add(rs)
for c in categories:
c_obj = DBHelper.get_category_by_name(c)
# can't find category in source server
if c_obj is None:
raise NotFound(payload={'detail': ('can not find {0} in source').format(c)})
lrd = LinkRsData(rs.id, c_obj.id)
db.session.add(lrd)
db.session.commit()
except NotFound as e:
db.session.rollback()
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
except Exception as e:
db.session.rollback()
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def check_rs_id(rs_id):
try:
result = ResourceSet.query.filter_by(rs_id=rs_id).first()
if result is not None:
return result
else:
return None
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def set_rs_id(obj):
"""
Expired
Mapping rs_id which generated by DOP with resource set
:return: Boolean
"""
try:
rs = ResourceSet.query.filter_by(name=obj['name']).first()
rs.rs_id = obj['rs_id']
db.session.commit()
return True
except Exception as e:
db.session.rollback()
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_data_by_rs_id(rs_id, category=None, label=None):
"""
Get data by rs_id
:param rs_id: generated by Data Operator for mapping resources set
:param category: specific category
:param label: specific label in category
:return: JSON
"""
try:
# get user_id and resource_set_id by rs_id
rs = ResourceSet.query.filter_by(rs_id=rs_id).first()
if rs is None:
raise NotFound(payload={'detail': ('Invalid parameter <rs_id>:{0}').format(rs_id)})
user_id = rs.user_id
resource_set_id = rs.id
# whether specific an categoriess
if category is not None:
# get the id of this category
ca = Categories.query.filter_by(name=category).first()
if ca is None:
raise NotFound(payload={'detail': ('can not find {0} in source').format(category)})
# raise ValueError('Invalid parameter <category>')
# whether resource has been registered
u_ca = LinkRsData.query.filter(db.and_(
LinkRsData.resource_set_id == resource_set_id,
LinkRsData.categories_id == ca.id
)).first()
# if not, return None
if u_ca is None:
raise NotFound(payload={
'detail': 'the category you request have not been registered in the resource set!'
})
# if yes, return data
# get all subsets of this category
if label is not None:
lb = Label.query.filter(db.and_(
Label.c_id == ca.id,
Label.name == label
)).all()
else:
lb = Label.query.filter_by(c_id=ca.id).all()
data = []
# get data
for b in lb:
cd = Data.query.filter(db.and_(
Data.user_id == user_id,
Data.label_id == b.id
)).order_by(Data.timestamp).all()
if b.u_id is None:
temp_unit = None
else:
temp_unit = b.units.name
data.append({
'label': b.name,
'sample': [d.serialized for d in cd],
'units': temp_unit
})
return {
'name': category,
'data': data
}
# if not specific categories
# get all categories in this resource set
ca = LinkRsData.query.filter_by(resource_set_id=resource_set_id).all()
if ca is None:
return None
result = []
# get user's data in this resource set by user_id and categories_id
for i in ca:
# get all labels of resource set
lb = Label.query.filter_by(c_id=i.categories_id).all()
data = []
for b in lb:
cd = Data.query.filter(db.and_(
Data.user_id == user_id,
Data.label_id == b.id
)).order_by(Data.timestamp).all()
if b.u_id is None:
temp_unit = None
else:
temp_unit = b.units.name
data.append({
'label': b.name,
'sample': [d.serialized for d in cd],
'units': temp_unit
})
result.append({
'name': i.categories.name,
'data': data
})
return result
except NotFound as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def set_category(name, desc=None):
"""
Add a categories
:param obj:{name:"",desc:""}
:return:True/False
"""
try:
ca = Categories(name, desc)
db.session.add(ca)
db.session.commit()
except Exception as e:
db.session.rollback()
app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_categories():
"""
Get categories list
:return: Object
"""
try:
result = Categories.query.all()
return result
except Exception as e:
app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_category_by_name(name):
"""
Get id by categories name
:return: id
"""
try:
result = Categories.query.filter_by(name=name).first()
if result is not None:
return result
else:
return None
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def set_label(name, units_id, category_id, desc=None):
"""
:param name:
:param units_id:
:param category_id:
:param desc:
:return:
"""
try:
lb = Label(name, units_id, category_id, desc)
db.session.add(lb)
db.session.commit()
return True
except Exception as e:
db.session.rollback()
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_labels():
"""
Get label list
:return: Object
"""
try:
result = Label.query.all()
return result
except Exception as e:
app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_label_by_name(name):
try:
result = Label.query.filter_by(name=name).first()
if result is not None:
return result
else:
return None
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def set_units(name, desc=None):
"""
:param name:
:param desc:
:return:
"""
try:
ut = Units(name, desc)
db.session.add(ut)
db.session.commit()
return True
except Exception as e:
db.session.rollback()
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_units():
try:
result = Units.query.all()
return result
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_units_by_name(name):
try:
result = Units.query.filter_by(name=name).first()
if result is not None:
return result
else:
return None
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def set_data(data):
"""
Import data into database
:param data:
:return: Boolean
"""
try:
d = Data(**data)
db.session.add(d)
db.session.commit()
return True
except Exception as e:
db.session.rollback()
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
@staticmethod
def get_data():
try:
pass
except Exception as e:
# app_log.error(repr(e), extra={'sender': 'DataSource'})
raise e
|
|
# mako/runtime.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides runtime services for templates, including Context,
Namespace, and various helper functions."""
from mako import exceptions, util, compat
from mako.compat import compat_builtins
import sys
class Context(object):
"""Provides runtime namespace, output buffer, and various
callstacks for templates.
See :ref:`runtime_toplevel` for detail on the usage of
:class:`.Context`.
"""
def __init__(self, buffer, **data):
self._buffer_stack = [buffer]
self._data = data
self._kwargs = data.copy()
self._with_template = None
self._outputting_as_unicode = None
self.namespaces = {}
# "capture" function which proxies to the
# generic "capture" function
self._data['capture'] = compat.partial(capture, self)
# "caller" stack used by def calls with content
self.caller_stack = self._data['caller'] = CallerStack()
def _set_with_template(self, t):
self._with_template = t
illegal_names = t.reserved_names.intersection(self._data)
if illegal_names:
raise exceptions.NameConflictError(
"Reserved words passed to render(): %s" %
", ".join(illegal_names))
@property
def lookup(self):
"""Return the :class:`.TemplateLookup` associated
with this :class:`.Context`.
"""
return self._with_template.lookup
@property
def kwargs(self):
"""Return the dictionary of top level keyword arguments associated
with this :class:`.Context`.
This dictionary only includes the top-level arguments passed to
:meth:`.Template.render`. It does not include names produced within
the template execution such as local variable names or special names
such as ``self``, ``next``, etc.
The purpose of this dictionary is primarily for the case that
a :class:`.Template` accepts arguments via its ``<%page>`` tag,
which are normally expected to be passed via :meth:`.Template.render`,
except the template is being called in an inheritance context,
using the ``body()`` method. :attr:`.Context.kwargs` can then be
used to propagate these arguments to the inheriting template::
${next.body(**context.kwargs)}
"""
return self._kwargs.copy()
def push_caller(self, caller):
"""Push a ``caller`` callable onto the callstack for
this :class:`.Context`."""
self.caller_stack.append(caller)
def pop_caller(self):
"""Pop a ``caller`` callable onto the callstack for this
:class:`.Context`."""
del self.caller_stack[-1]
def keys(self):
"""Return a list of all names established in this :class:`.Context`."""
return list(self._data.keys())
def __getitem__(self, key):
if key in self._data:
return self._data[key]
else:
return compat_builtins.__dict__[key]
def _push_writer(self):
"""push a capturing buffer onto this Context and return
the new writer function."""
buf = util.FastEncodingBuffer()
self._buffer_stack.append(buf)
return buf.write
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write
def _push_buffer(self):
"""push a capturing buffer onto this Context."""
self._push_writer()
def _pop_buffer(self):
"""pop the most recent capturing buffer from this Context."""
return self._buffer_stack.pop()
def get(self, key, default=None):
"""Return a value from this :class:`.Context`."""
return self._data.get(key, compat_builtins.__dict__.get(key, default))
def write(self, string):
"""Write a string to this :class:`.Context` object's
underlying output buffer."""
self._buffer_stack[-1].write(string)
def writer(self):
"""Return the current writer function."""
return self._buffer_stack[-1].write
def _copy(self):
c = Context.__new__(Context)
c._buffer_stack = self._buffer_stack
c._data = self._data.copy()
c._kwargs = self._kwargs
c._with_template = self._with_template
c._outputting_as_unicode = self._outputting_as_unicode
c.namespaces = self.namespaces
c.caller_stack = self.caller_stack
return c
def _locals(self, d):
"""Create a new :class:`.Context` with a copy of this
:class:`.Context`'s current state,
updated with the given dictionary.
The :attr:`.Context.kwargs` collection remains
unaffected.
"""
if not d:
return self
c = self._copy()
c._data.update(d)
return c
def _clean_inheritance_tokens(self):
"""create a new copy of this :class:`.Context`. with
tokens related to inheritance state removed."""
c = self._copy()
x = c._data
x.pop('self', None)
x.pop('parent', None)
x.pop('next', None)
return c
class CallerStack(list):
def __init__(self):
self.nextcaller = None
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return len(self) and self._get_caller() and True or False
def _get_caller(self):
# this method can be removed once
# codegen MAGIC_NUMBER moves past 7
return self[-1]
def __getattr__(self, key):
return getattr(self._get_caller(), key)
def _push_frame(self):
frame = self.nextcaller or None
self.append(frame)
self.nextcaller = None
return frame
def _pop_frame(self):
self.nextcaller = self.pop()
class Undefined(object):
"""Represents an undefined value in a template.
All template modules have a constant value
``UNDEFINED`` present which is an instance of this
object.
"""
def __str__(self):
raise NameError("Undefined")
def __nonzero__(self):
return self.__bool__()
def __bool__(self):
return False
UNDEFINED = Undefined()
STOP_RENDERING = ""
class LoopStack(object):
"""a stack for LoopContexts that implements the context manager protocol
to automatically pop off the top of the stack on context exit
"""
def __init__(self):
self.stack = []
def _enter(self, iterable):
self._push(iterable)
return self._top
def _exit(self):
self._pop()
return self._top
@property
def _top(self):
if self.stack:
return self.stack[-1]
else:
return self
def _pop(self):
return self.stack.pop()
def _push(self, iterable):
new = LoopContext(iterable)
if self.stack:
new.parent = self.stack[-1]
return self.stack.append(new)
def __getattr__(self, key):
raise exceptions.RuntimeException("No loop context is established")
def __iter__(self):
return iter(self._top)
class LoopContext(object):
"""A magic loop variable.
Automatically accessible in any ``% for`` block.
See the section :ref:`loop_context` for usage
notes.
:attr:`parent` -> :class:`.LoopContext` or ``None``
The parent loop, if one exists.
:attr:`index` -> `int`
The 0-based iteration count.
:attr:`reverse_index` -> `int`
The number of iterations remaining.
:attr:`first` -> `bool`
``True`` on the first iteration, ``False`` otherwise.
:attr:`last` -> `bool`
``True`` on the last iteration, ``False`` otherwise.
:attr:`even` -> `bool`
``True`` when ``index`` is even.
:attr:`odd` -> `bool`
``True`` when ``index`` is odd.
"""
def __init__(self, iterable):
self._iterable = iterable
self.index = 0
self.parent = None
def __iter__(self):
for i in self._iterable:
yield i
self.index += 1
@util.memoized_instancemethod
def __len__(self):
return len(self._iterable)
@property
def reverse_index(self):
return len(self) - self.index - 1
@property
def first(self):
return self.index == 0
@property
def last(self):
return self.index == len(self) - 1
@property
def even(self):
return not self.odd
@property
def odd(self):
return bool(self.index % 2)
def cycle(self, *values):
"""Cycle through values as the loop progresses.
"""
if not values:
raise ValueError("You must provide values to cycle through")
return values[self.index % len(values)]
class _NSAttr(object):
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
ns = self.__parent
while ns:
if hasattr(ns.module, key):
return getattr(ns.module, key)
else:
ns = ns.inherits
raise AttributeError(key)
class Namespace(object):
"""Provides access to collections of rendering methods, which
can be local, from other templates, or from imported modules.
To access a particular rendering method referenced by a
:class:`.Namespace`, use plain attribute access:
.. sourcecode:: mako
${some_namespace.foo(x, y, z)}
:class:`.Namespace` also contains several built-in attributes
described here.
"""
def __init__(self, name, context,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
callables = ()
module = None
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
template = None
"""The :class:`.Template` object referenced by this
:class:`.Namespace`, if any.
"""
context = None
"""The :class:`.Context` object for this :class:`.Namespace`.
Namespaces are often created with copies of contexts that
contain slightly different data, particularly in inheritance
scenarios. Using the :class:`.Context` off of a :class:`.Namespace` one
can traverse an entire chain of templates that inherit from
one-another.
"""
filename = None
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
If this is a pure module-based
:class:`.Namespace`, this evaluates to ``module.__file__``. If a
template-based namespace, it evaluates to the original
template file location.
"""
uri = None
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
_templateuri = None
@util.memoized_property
def attr(self):
"""Access module level attributes by name.
This accessor allows templates to supply "scalar"
attributes which are particularly handy in inheritance
relationships.
.. seealso::
:ref:`inheritance_attr`
:ref:`namespace_attr_for_includes`
"""
return _NSAttr(self)
def get_namespace(self, uri):
"""Return a :class:`.Namespace` corresponding to the given ``uri``.
If the given ``uri`` is a relative URI (i.e. it does not
contain a leading slash ``/``), the ``uri`` is adjusted to
be relative to the ``uri`` of the namespace itself. This
method is therefore mostly useful off of the built-in
``local`` namespace, described in :ref:`namespace_local`.
In
most cases, a template wouldn't need this function, and
should instead use the ``<%namespace>`` tag to load
namespaces. However, since all ``<%namespace>`` tags are
evaluated before the body of a template ever runs,
this method can be used to locate namespaces using
expressions that were generated within the body code of
the template, or to conditionally use a particular
namespace.
"""
key = (self, uri)
if key in self.context.namespaces:
return self.context.namespaces[key]
else:
ns = TemplateNamespace(uri, self.context._copy(),
templateuri=uri,
calling_uri=self._templateuri)
self.context.namespaces[key] = ns
return ns
def get_template(self, uri):
"""Return a :class:`.Template` from the given ``uri``.
The ``uri`` resolution is relative to the ``uri`` of this
:class:`.Namespace` object's :class:`.Template`.
"""
return _lookup_template(self.context, uri, self._templateuri)
def get_cached(self, key, **kwargs):
"""Return a value from the :class:`.Cache` referenced by this
:class:`.Namespace` object's :class:`.Template`.
The advantage to this method versus direct access to the
:class:`.Cache` is that the configuration parameters
declared in ``<%page>`` take effect here, thereby calling
up the same configured backend as that configured
by ``<%page>``.
"""
return self.cache.get(key, **kwargs)
@property
def cache(self):
"""Return the :class:`.Cache` object referenced
by this :class:`.Namespace` object's
:class:`.Template`.
"""
return self.template.cache
def include_file(self, uri, **kwargs):
"""Include a file at the given ``uri``."""
_include_file(self.context, uri, self._templateuri, **kwargs)
def _populate(self, d, l):
for ident in l:
if ident == '*':
for (k, v) in self._get_star():
d[k] = v
else:
d[ident] = getattr(self, ident)
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
class TemplateNamespace(Namespace):
"""A :class:`.Namespace` specific to a :class:`.Template` instance."""
def __init__(self, name, context, template=None, templateuri=None,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
if templateuri is not None:
self.template = _lookup_template(context, templateuri,
calling_uri)
self._templateuri = self.template.module._template_uri
elif template is not None:
self.template = template
self._templateuri = template.module._template_uri
else:
raise TypeError("'template' argument is required.")
if populate_self:
lclcallable, lclcontext = \
_populate_self_namespace(context, self.template,
self_ns=self)
@property
def module(self):
"""The Python module referenced by this :class:`.Namespace`.
If the namespace references a :class:`.Template`, then
this module is the equivalent of ``template.module``,
i.e. the generated module for the template.
"""
return self.template.module
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.template.filename
@property
def uri(self):
"""The URI for this :class:`.Namespace`'s template.
I.e. whatever was sent to :meth:`.TemplateLookup.get_template()`.
This is the equivalent of :attr:`.Template.uri`.
"""
return self.template.uri
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
def get(key):
callable_ = self.template._get_def_callable(key)
return compat.partial(callable_, self.context)
for k in self.template.module._exports:
yield (k, get(k))
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif self.template.has_def(key):
callable_ = self.template._get_def_callable(key)
val = compat.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
class ModuleNamespace(Namespace):
"""A :class:`.Namespace` specific to a Python module instance."""
def __init__(self, name, context, module,
callables=None, inherits=None,
populate_self=True, calling_uri=None):
self.name = name
self.context = context
self.inherits = inherits
if callables is not None:
self.callables = dict([(c.__name__, c) for c in callables])
mod = __import__(module)
for token in module.split('.')[1:]:
mod = getattr(mod, token)
self.module = mod
@property
def filename(self):
"""The path of the filesystem file used for this
:class:`.Namespace`'s module or template.
"""
return self.module.__file__
def _get_star(self):
if self.callables:
for key in self.callables:
yield (key, self.callables[key])
for key in dir(self.module):
if key[0] != '_':
callable_ = getattr(self.module, key)
if compat.callable(callable_):
yield key, compat.partial(callable_, self.context)
def __getattr__(self, key):
if key in self.callables:
val = self.callables[key]
elif hasattr(self.module, key):
callable_ = getattr(self.module, key)
val = compat.partial(callable_, self.context)
elif self.inherits:
val = getattr(self.inherits, key)
else:
raise AttributeError(
"Namespace '%s' has no member '%s'" %
(self.name, key))
setattr(self, key, val)
return val
def supports_caller(func):
"""Apply a caller_stack compatibility decorator to a plain
Python function.
See the example in :ref:`namespaces_python_modules`.
"""
def wrap_stackframe(context, *args, **kwargs):
context.caller_stack._push_frame()
try:
return func(context, *args, **kwargs)
finally:
context.caller_stack._pop_frame()
return wrap_stackframe
def capture(context, callable_, *args, **kwargs):
"""Execute the given template def, capturing the output into
a buffer.
See the example in :ref:`namespaces_python_modules`.
"""
if not compat.callable(callable_):
raise exceptions.RuntimeException(
"capture() function expects a callable as "
"its argument (i.e. capture(func, *args, **kwargs))"
)
context._push_buffer()
try:
callable_(*args, **kwargs)
finally:
buf = context._pop_buffer()
return buf.getvalue()
def _decorate_toplevel(fn):
def decorate_render(render_fn):
def go(context, *args, **kw):
def y(*args, **kw):
return render_fn(context, *args, **kw)
try:
y.__name__ = render_fn.__name__[7:]
except TypeError:
# < Python 2.4
pass
return fn(y)(context, *args, **kw)
return go
return decorate_render
def _decorate_inline(context, fn):
def decorate_render(render_fn):
dec = fn(render_fn)
def go(*args, **kw):
return dec(context, *args, **kw)
return go
return decorate_render
def _include_file(context, uri, calling_uri, **kwargs):
"""locate the template from the given uri and include it in
the current output."""
template = _lookup_template(context, uri, calling_uri)
(callable_, ctx) = _populate_self_namespace(
context._clean_inheritance_tokens(),
template)
kwargs = _kwargs_for_include(callable_, context._data, **kwargs)
if template.include_error_handler:
try:
callable_(ctx, **kwargs)
except Exception:
result = template.include_error_handler(ctx, compat.exception_as())
if not result:
compat.reraise(*sys.exc_info())
else:
callable_(ctx, **kwargs)
def _inherit_from(context, uri, calling_uri):
"""called by the _inherit method in template modules to set
up the inheritance chain at the start of a template's
execution."""
if uri is None:
return None
template = _lookup_template(context, uri, calling_uri)
self_ns = context['self']
ih = self_ns
while ih.inherits is not None:
ih = ih.inherits
lclcontext = context._locals({'next': ih})
ih.inherits = TemplateNamespace("self:%s" % template.uri,
lclcontext,
template=template,
populate_self=False)
context._data['parent'] = lclcontext._data['local'] = ih.inherits
callable_ = getattr(template.module, '_mako_inherit', None)
if callable_ is not None:
ret = callable_(template, lclcontext)
if ret:
return ret
gen_ns = getattr(template.module, '_mako_generate_namespaces', None)
if gen_ns is not None:
gen_ns(context)
return (template.callable_, lclcontext)
def _lookup_template(context, uri, relativeto):
lookup = context._with_template.lookup
if lookup is None:
raise exceptions.TemplateLookupException(
"Template '%s' has no TemplateLookup associated" %
context._with_template.uri)
uri = lookup.adjust_uri(uri, relativeto)
try:
return lookup.get_template(uri)
except exceptions.TopLevelLookupException:
raise exceptions.TemplateLookupException(str(compat.exception_as()))
def _populate_self_namespace(context, template, self_ns=None):
if self_ns is None:
self_ns = TemplateNamespace('self:%s' % template.uri,
context, template=template,
populate_self=False)
context._data['self'] = context._data['local'] = self_ns
if hasattr(template.module, '_mako_inherit'):
ret = template.module._mako_inherit(template, context)
if ret:
return ret
return (template.callable_, context)
def _render(template, callable_, args, data, as_unicode=False):
"""create a Context and return the string
output of the given template and template callable."""
if as_unicode:
buf = util.FastEncodingBuffer(as_unicode=True)
elif template.bytestring_passthrough:
buf = compat.StringIO()
else:
buf = util.FastEncodingBuffer(
as_unicode=as_unicode,
encoding=template.output_encoding,
errors=template.encoding_errors)
context = Context(buf, **data)
context._outputting_as_unicode = as_unicode
context._set_with_template(template)
_render_context(template, callable_, context, *args,
**_kwargs_for_callable(callable_, data))
return context._pop_buffer().getvalue()
def _kwargs_for_callable(callable_, data):
argspec = compat.inspect_func_args(callable_)
# for normal pages, **pageargs is usually present
if argspec[2]:
return data
# for rendering defs from the top level, figure out the args
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
kwargs = {}
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _kwargs_for_include(callable_, data, **kwargs):
argspec = compat.inspect_func_args(callable_)
namedargs = argspec[0] + [v for v in argspec[1:3] if v is not None]
for arg in namedargs:
if arg != 'context' and arg in data and arg not in kwargs:
kwargs[arg] = data[arg]
return kwargs
def _render_context(tmpl, callable_, context, *args, **kwargs):
import mako.template as template
# create polymorphic 'self' namespace for this
# template with possibly updated context
if not isinstance(tmpl, template.DefTemplate):
# if main render method, call from the base of the inheritance stack
(inherit, lclcontext) = _populate_self_namespace(context, tmpl)
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
else:
# otherwise, call the actual rendering method specified
(inherit, lclcontext) = _populate_self_namespace(context, tmpl.parent)
_exec_template(callable_, context, args=args, kwargs=kwargs)
def _exec_template(callable_, context, args=None, kwargs=None):
"""execute a rendering callable given the callable, a
Context, and optional explicit arguments
the contextual Template will be located if it exists, and
the error handling options specified on that Template will
be interpreted here.
"""
template = context._with_template
if template is not None and \
(template.format_exceptions or template.error_handler):
try:
callable_(context, *args, **kwargs)
except Exception:
_render_error(template, context, compat.exception_as())
except:
e = sys.exc_info()[0]
_render_error(template, context, e)
else:
callable_(context, *args, **kwargs)
def _render_error(template, context, error):
if template.error_handler:
result = template.error_handler(context, error)
if not result:
compat.reraise(*sys.exc_info())
else:
error_template = exceptions.html_error_template()
if context._outputting_as_unicode:
context._buffer_stack[:] = [
util.FastEncodingBuffer(as_unicode=True)]
else:
context._buffer_stack[:] = [util.FastEncodingBuffer(
error_template.output_encoding,
error_template.encoding_errors)]
context._set_with_template(error_template)
error_template.render_context(context, error=error)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
import google.api_core.operations_v1
from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc
class BigtableInstanceAdminGrpcTransport(object):
"""gRPC transport class providing stubs for
google.bigtable.admin.v2 BigtableInstanceAdmin API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
'https://www.googleapis.com/auth/bigtable.admin',
'https://www.googleapis.com/auth/bigtable.admin.cluster',
'https://www.googleapis.com/auth/bigtable.admin.instance',
'https://www.googleapis.com/auth/bigtable.admin.table',
'https://www.googleapis.com/auth/cloud-bigtable.admin',
'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster',
'https://www.googleapis.com/auth/cloud-bigtable.admin.table',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
)
def __init__(self,
channel=None,
credentials=None,
address='bigtableadmin.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.', )
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'bigtable_instance_admin_stub':
bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub(
channel),
}
# Because this API includes a method that returns a
# long-running operation (proto: google.longrunning.Operation),
# instantiate an LRO client.
self._operations_client = google.api_core.operations_v1.OperationsClient(
channel)
@classmethod
def create_channel(cls,
address='bigtableadmin.googleapis.com:443',
credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
)
@property
def create_instance(self):
"""Return the gRPC stub for {$apiMethod.name}.
Create an instance within a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].CreateInstance
@property
def get_instance(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets information about an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].GetInstance
@property
def list_instances(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists information about instances in a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].ListInstances
@property
def update_instance(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates an instance within a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].UpdateInstance
@property
def partial_update_instance(self):
"""Return the gRPC stub for {$apiMethod.name}.
Partially updates an instance within a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs[
'bigtable_instance_admin_stub'].PartialUpdateInstance
@property
def delete_instance(self):
"""Return the gRPC stub for {$apiMethod.name}.
Delete an instance from a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].DeleteInstance
@property
def create_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates a cluster within an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].CreateCluster
@property
def get_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets information about a cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].GetCluster
@property
def list_clusters(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists information about clusters in an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].ListClusters
@property
def update_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates a cluster within an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].UpdateCluster
@property
def delete_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes a cluster from an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].DeleteCluster
@property
def create_app_profile(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates an app profile within an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].CreateAppProfile
@property
def get_app_profile(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets information about an app profile.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].GetAppProfile
@property
def list_app_profiles(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists information about app profiles in an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].ListAppProfiles
@property
def update_app_profile(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates an app profile within an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].UpdateAppProfile
@property
def delete_app_profile(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes an app profile from an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].DeleteAppProfile
@property
def get_iam_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets the access control policy for an instance resource. Returns an empty
policy if an instance exists but does not have a policy set.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].GetIamPolicy
@property
def set_iam_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the access control policy on an instance resource. Replaces any
existing policy.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].SetIamPolicy
@property
def test_iam_permissions(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns permissions that the caller has on the specified instance resource.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].TestIamPermissions
|
|
#!/usr/bin/env python
import os, sys # system functions
import nipype.interfaces.io as nio # Data i/o
from nipype.interfaces.io import DataSink
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.ants as ants
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.utility as util # utility
import nipype.algorithms.modelgen as model # model generation
import errno
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
"""
Project info
"""
# Subject to run
subj_list = str(sys.argv[1])
project_dir = "/mnt/net/LaCie/Analysis/RuleSwitch/"
work_dir = "/Users/dmitrii/scratch/RuleSwitch/"
model_id = '_univar1a_LearnApply'
task_id = 1
TR = 2.0
# fwhm_thr=5.999541516002418 #to compare with FSL
fwhm_thr = 8.0
hpcutoff = 100
film_thr = 1000 # default in FSL
film_ms = 5 # this should be Susan mask size, not fwhm
template_brain = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
template_mask = fsl.Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz')
wf = pe.Workflow(name='wf')
wf.base_dir = os.path.join(work_dir, "wdir" + str(model_id) + "lvl12")
wf.config = {"execution": {"crashdump_dir": os.path.join(project_dir, 'crashdumps')}}
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = ('subject_id', [subj_list])
def get_subjectinfo(subject_id, base_dir, task_id, model_id):
import os
import numpy as np
if subject_id == "Subject003" or subject_id == "Subject011" or subject_id == "Subject016" or subject_id == "Subject020":
run_id = [2, 3, 4, 5, 6, 7, 8]
itk_id = list(np.array(run_id) - 2)
evs_l2 = dict(ev001=[1, 1, 1, 0, 0, 0, 0], ev002=[0, 0, 0, 1, 1, 1, 1])
elif subject_id == "Subject019":
run_id = [1, 2, 3, 4, 5, 6]
itk_id = list(np.array(run_id) - 1)
evs_l2 = dict(ev001=[1, 1, 1, 1, 0, 0], ev002=[0, 0, 0, 0, 1, 1])
else:
run_id = [1, 2, 3, 4, 5, 6, 7, 8]
itk_id = list(np.array(run_id) - 1)
evs_l2 = dict(ev001=[1, 1, 1, 1, 0, 0, 0, 0], ev002=[0, 0, 0, 0, 1, 1, 1, 1])
# Conditions for level 1
condition_info = []
cond_file = os.path.join(base_dir, 'models', 'model%s' % model_id,
'condition_key.txt')
with open(cond_file, 'rt') as fp:
for line in fp:
info = line.strip().split()
condition_info.append([info[0], info[1], ' '.join(info[2:])])
if len(condition_info) == 0:
raise ValueError('No condition info found in %s' % cond_file)
taskinfo = np.array(condition_info)
n_tasks = len(np.unique(taskinfo[:, 0]))
conds = []
if task_id > n_tasks:
raise ValueError('Task id %d does not exist' % task_id)
for idx in range(n_tasks):
taskidx = np.where(taskinfo[:, 0] == 'task%03d' % (idx + 1))
conds.append([condition.replace(' ', '_') for condition
in taskinfo[taskidx[0], 2]])
# Conditions for level 2
condition_info_l2 = []
cond_file_l2 = os.path.join(base_dir, 'models', 'model%s' % model_id,
'condition_key_l2.txt')
with open(cond_file_l2, 'rt') as fp_l2:
for line in fp_l2:
info_l2 = line.strip().split()
condition_info_l2.append([info_l2[0], info_l2[1], ' '.join(info_l2[2:])])
if len(condition_info_l2) == 0:
raise ValueError('No condition info found in %s' % cond_file_l2)
taskinfo_l2 = np.array(condition_info_l2)
n_tasks_l2 = len(np.unique(taskinfo_l2[:, 0]))
conds_l2 = []
if task_id > n_tasks_l2:
raise ValueError('Task id %d does not exist' % task_id)
for idx in range(n_tasks_l2):
taskidx_l2 = np.where(taskinfo_l2[:, 0] == 'task%03d' % (idx + 1))
conds_l2.append([condition_l2.replace(' ', '_') for condition_l2
in taskinfo_l2[taskidx_l2[0], 2]])
return subject_id, model_id, task_id, run_id, conds[task_id - 1], itk_id, evs_l2, conds_l2[task_id - 1]
subjinfo = pe.Node(util.Function(input_names=['subject_id', 'base_dir', 'task_id', 'model_id'],
output_names=['subject_id', 'model_id', 'task_id', 'run_id', 'conds', 'itk_id',
'evs_l2', 'conds_l2'],
function=get_subjectinfo),
name='subjectinfo')
subjinfo.inputs.base_dir = project_dir
subjinfo.inputs.task_id = task_id
subjinfo.inputs.model_id = model_id
datasource = pe.Node(nio.DataGrabber(infields=['subject_id', 'model_id', 'task_id', 'run_id', 'itk_id'],
outfields=['func', 'struct', 'behave', 'contrasts', 'contrasts_l2', 'confound',
'itk_transform', 'composite_transform']), name='grabber')
datasource.inputs.base_directory = project_dir
datasource.inputs.template = '*'
datasource.inputs.field_template = dict(func='%s/bold/run%d/run*_mcf_brain.nii.gz',
struct='%s/anatomy/highres001_BrainExtractionBrain.nii.gz',
behave='%s/model/model%s/onsets/task%03d_run%d/ev*.txt',
contrasts='models/model%s/task_contrasts.txt',
contrasts_l2='models/model%s/task_contrasts_l2.txt',
confound='%s/bold/run%d/confound.txt',
itk_transform='reg/%s/bold/func2standard_mat/_subject_id_%s/_convert2itk%d/affine.txt',
composite_transform='reg/%s/anatomy/anat2standard_mat/_subject_id_%s/output_Composite.h5')
datasource.inputs.template_args = dict(func=[['subject_id', 'run_id']],
struct=[['subject_id']],
behave=[['subject_id', 'model_id', 'task_id', 'run_id']],
contrasts=[['model_id']],
contrasts_l2=[['model_id']],
confound=[['subject_id', 'run_id']],
itk_transform=[['subject_id', 'subject_id', 'itk_id']],
composite_transform=[['subject_id', 'subject_id']])
datasource.inputs.sort_filelist = True
def check_behav_list(behav, run_id):
import numpy as np
run_num = len(run_id)
if isinstance(behav, (str, bytes)):
behav = [behav]
behav_array = np.array(behav).flatten()
num_elements = behav_array.shape[0]
behav_array = behav_array.reshape(run_num, int(num_elements / run_num)).tolist()
return behav_array
reshape_behav = pe.Node(util.Function(input_names=['behav', 'run_id'],
output_names=['behav'],
function=check_behav_list),
name='reshape_behav')
wf.connect([(infosource, subjinfo, [('subject_id', 'subject_id')]), ])
wf.connect(subjinfo, 'subject_id', datasource, 'subject_id')
wf.connect(subjinfo, 'model_id', datasource, 'model_id')
wf.connect(subjinfo, 'task_id', datasource, 'task_id')
wf.connect(subjinfo, 'run_id', datasource, 'run_id')
wf.connect(subjinfo, 'itk_id', datasource, 'itk_id')
wf.connect(datasource, 'behave', reshape_behav, 'behav')
wf.connect(subjinfo, 'run_id', reshape_behav, 'run_id')
"""
Setup preprocessing workflow
----------------------------
Set up a node to define all inputs required for the preprocessing workflow
"""
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'struct', ]),
name='inputspec')
wf.connect([(datasource, inputnode, [('struct', 'struct'), ('func', 'func'), ]), ])
"""
Convert functional images to float representation. Since there can be more than
one functional run we use a MapNode to convert each run.
"""
prefiltered_func_data = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float',
op_string='',
suffix='_dtype'),
iterfield=['in_file'],
name='prefiltered_func_data')
wf.connect(inputnode, 'func', prefiltered_func_data, 'in_file')
"""
Determine the 2nd and 98th percentile intensities of each functional run
"""
getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'),
iterfield=['in_file'],
name='getthreshold')
wf.connect(prefiltered_func_data, 'out_file', getthresh, 'in_file')
"""
Threshold the first run of the functional data at 10% of the 98th percentile
"""
threshold = pe.MapNode(interface=fsl.ImageMaths(out_data_type='char',
suffix='_thresh'),
iterfield=['in_file'],
name='threshold')
"""
Define a function to get 10% of the intensity
"""
def getthreshop(thresh):
return '-thr %.10f -Tmin -bin' % (0.1 * thresh[0][1])
wf.connect(prefiltered_func_data, 'out_file', threshold, 'in_file')
wf.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield=['in_file', 'mask_file'],
name='medianval')
wf.connect(prefiltered_func_data, 'out_file', medianval, 'in_file')
wf.connect(threshold, 'out_file', medianval, 'mask_file')
"""
Dilate the mask
"""
dilatemask = pe.MapNode(interface=fsl.ImageMaths(suffix='_dil',
op_string='-dilF'),
iterfield=['in_file'],
name='dilatemask')
wf.connect(threshold, 'out_file', dilatemask, 'in_file')
"""
Mask the motion corrected functional runs with the dilated mask
"""
prefiltered_func_data_thresh = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='prefiltered_func_data_thresh')
wf.connect(prefiltered_func_data, 'out_file', prefiltered_func_data_thresh, 'in_file')
wf.connect(dilatemask, 'out_file', prefiltered_func_data_thresh, 'in_file2')
"""
Determine the mean image from each functional run
"""
meanfunc2 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc2')
wf.connect(prefiltered_func_data_thresh, 'out_file', meanfunc2, 'in_file')
"""
Merge the median values with the mean functional images into a coupled list
"""
# Yes, it is Node with iterfield! Not MapNode.
mergenode = pe.Node(interface=util.Merge(2, axis='hstack'),
iterfield=['in1', 'in2'],
name='merge')
wf.connect(meanfunc2, 'out_file', mergenode, 'in1')
wf.connect(medianval, 'out_stat', mergenode, 'in2')
"""
Smooth each run using SUSAN with the brightness threshold set to 75% of the
median value for each run and a mask constituting the mean functional
"""
smooth = pe.MapNode(interface=fsl.SUSAN(),
iterfield=['in_file', 'brightness_threshold', 'usans'],
name='smooth')
smooth.inputs.fwhm = fwhm_thr
"""
Define a function to get the brightness threshold for SUSAN
"""
def getbtthresh(medianvals):
return [0.75 * val for val in medianvals]
def getusans(x):
return [[tuple([val[0], 0.75 * val[1]])] for val in x]
wf.connect(prefiltered_func_data_thresh, 'out_file', smooth, 'in_file')
wf.connect(medianval, ('out_stat', getbtthresh), smooth, 'brightness_threshold')
wf.connect(mergenode, ('out', getusans), smooth, 'usans')
"""
Mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file', 'in_file2'],
name='maskfunc3')
wf.connect(smooth, 'smoothed_file', maskfunc3, 'in_file')
wf.connect(dilatemask, 'out_file', maskfunc3, 'in_file2')
"""
Scale each volume of the run so that the median value of the run is set to 10000
"""
intnorm = pe.MapNode(interface=fsl.ImageMaths(suffix='_intnorm'),
iterfield=['in_file', 'op_string'],
name='intnorm')
"""
Define a function to get the scaling factor for intensity normalization
"""
def getinormscale(medianvals):
return ['-mul %.10f' % (10000. / val) for val in medianvals]
wf.connect(maskfunc3, 'out_file', intnorm, 'in_file')
wf.connect(medianval, ('out_stat', getinormscale), intnorm, 'op_string')
"""
Create tempMean
"""
tempMean = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='tempMean')
wf.connect(intnorm, 'out_file', tempMean, 'in_file')
"""
Perform temporal highpass filtering on the data
"""
highpass = pe.MapNode(
interface=fsl.ImageMaths(op_string='-bptf %d -1 -add' % (hpcutoff / (2 * TR)), suffix='_tempfilt'),
iterfield=['in_file', 'in_file2'],
name='highpass')
wf.connect(tempMean, 'out_file', highpass, 'in_file2')
wf.connect(intnorm, 'out_file', highpass, 'in_file')
"""
Set up LEVEL 1
--------------
"""
"""
Setup a basic set of contrasts
"""
def get_contrasts(contrast_file, task_id, conds):
import numpy as np
contrast_def = np.genfromtxt(contrast_file, dtype=object)
if len(contrast_def.shape) == 1:
contrast_def = contrast_def[None, :]
contrasts = []
for row in contrast_def:
if row[0] != 'task%03d' % task_id:
continue
con = [row[1], 'T', ['ev%03d' % (i + 1) for i in range(len(conds))],
row[2:].astype(float).tolist()]
contrasts.append(con)
return contrasts
contrastgen = pe.Node(util.Function(input_names=['contrast_file',
'task_id', 'conds'],
output_names=['contrasts'],
function=get_contrasts),
name='contrastgen')
wf.connect(subjinfo, 'conds', contrastgen, 'conds')
wf.connect(datasource, 'contrasts', contrastgen, 'contrast_file')
wf.connect(subjinfo, 'task_id', contrastgen, 'task_id')
"""
Set up model fitting workflow
-----------------------------
Use :class:`nipype.algorithms.modelgen.SpecifyModel` to generate design information.
"""
modelspec = pe.MapNode(interface=model.SpecifyModel(), iterfield=['event_files', 'functional_runs'], name="modelspec")
modelspec.inputs.input_units = 'secs'
modelspec.inputs.high_pass_filter_cutoff = hpcutoff
modelspec.inputs.time_repetition = TR
wf.connect(reshape_behav, 'behav', modelspec, 'event_files')
wf.connect(highpass, 'out_file', modelspec, 'functional_runs')
"""
Use :class:`nipype.interfaces.fsl.Level1Design` to generate a run specific fsf
file for analysis
"""
level1design = pe.MapNode(interface=fsl.Level1Design(), iterfield=['session_info'], name="level1design")
level1design.inputs.interscan_interval = TR
level1design.inputs.bases = {'dgamma': {'derivs': True}}
level1design.inputs.model_serial_correlations = True
wf.connect(contrastgen, 'contrasts', level1design, 'contrasts')
wf.connect(modelspec, 'session_info', level1design, 'session_info')
"""
Use :class:`nipype.interfaces.fsl.FEATModel` to generate a run specific mat
file for use by FILMGLS
"""
modelgen = pe.MapNode(interface=fsl.FEATModel(), name='modelgen',
iterfield=['fsf_file', 'ev_files', 'args'])
wf.connect(level1design, 'ev_files', modelgen, 'ev_files')
wf.connect(level1design, 'fsf_files', modelgen, 'fsf_file')
wf.connect(datasource, 'confound', modelgen, 'args')
"""
Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model specified by a
mat file and a functional run
"""
modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True,
mask_size=film_ms,
threshold=film_thr),
name='modelestimate',
iterfield=['design_file', 'in_file', 'tcon_file'])
wf.connect([(highpass, modelestimate, [('out_file', 'in_file')]),
(modelgen, modelestimate, [('design_file', 'design_file')]),
])
wf.connect(modelgen, 'con_file', modelestimate, 'tcon_file')
"""
Level 2
-----------------------------
Apply Registration
Here we merge copes, varcopes, masks and transformation matrices for each run to register them appropriately.
Then we split them back to merge in time and use in flameo.
"""
"""
Merge transforms
"""
merge_mat = pe.MapNode(util.Merge(2), iterfield=['in2'], name='merge_mat')
wf.connect(datasource, 'itk_transform', merge_mat, 'in2')
wf.connect(datasource, 'composite_transform', merge_mat, 'in1')
def warp_files(copes, varcopes, mat, template_brain):
import nipype.interfaces.ants as ants
out_copes = []
out_varcopes = []
warp = ants.ApplyTransforms()
warp.inputs.input_image_type = 0
warp.inputs.interpolation = 'Linear'
warp.inputs.invert_transform_flags = [False, False]
warp.inputs.terminal_output = 'file'
warp.inputs.reference_image = template_brain
warp.inputs.transforms = mat
for cope in copes:
warp.inputs.input_image = cope
res = warp.run()
out_copes.append(str(res.outputs.output_image))
for varcope in varcopes:
warp.inputs.input_image = varcope
res = warp.run()
out_varcopes.append(str(res.outputs.output_image))
return out_copes, out_varcopes
warpfunc = pe.MapNode(util.Function(input_names=['copes', 'varcopes', 'mat', 'template_brain'],
output_names=['out_copes', 'out_varcopes'],
function=warp_files),
iterfield=['copes', 'varcopes', 'mat'],
name='warpfunc')
warpfunc.inputs.template_brain = template_brain
wf.connect(modelestimate, 'copes', warpfunc, 'copes')
wf.connect(modelestimate, 'varcopes', warpfunc, 'varcopes')
wf.connect(merge_mat, 'out', warpfunc, 'mat')
"""
Setup a set of contrasts for level 2.
"""
def sort_copes(files):
numelements = len(files[0])
outfiles = []
for i in range(numelements):
outfiles.insert(i, [])
for j, elements in enumerate(files):
outfiles[i].append(elements[i])
return outfiles
def get_contrasts_l2(contrast_file, task_id, conds, evs_l2, copes):
import numpy as np
contrast_def = np.genfromtxt(contrast_file, dtype=object)
if len(contrast_def.shape) == 1:
contrast_def = contrast_def[None, :]
contrasts = []
for row in contrast_def:
if row[0] != 'task%03d' % task_id:
continue
con = [row[1], 'T', ['ev%03d' % (i + 1) for i in range(len(conds))],
row[2:].astype(float).tolist()]
contrasts.append(con)
# create EVs for the next MapNode
evs_l2 = [evs_l2] * len(copes)
return contrasts, evs_l2
contrastgen_l2 = pe.Node(util.Function(input_names=['contrast_file', 'task_id', 'conds', 'evs_l2', 'copes'],
output_names=['contrasts', 'evs_l2'],
function=get_contrasts_l2),
name='contrastgen_l2')
wf.connect(subjinfo, 'conds_l2', contrastgen_l2, 'conds')
wf.connect(datasource, 'contrasts_l2', contrastgen_l2, 'contrast_file')
wf.connect(subjinfo, 'task_id', contrastgen_l2, 'task_id')
wf.connect(subjinfo, 'evs_l2', contrastgen_l2, 'evs_l2')
wf.connect(warpfunc, ('out_copes', sort_copes), contrastgen_l2, 'copes')
"""
Flameo has a bug. If one level-1 cope is empty (0 0 0 in EV), flameo produces all copes. Check for the bug and fix it.
"""
def zero_copes_check(evs_l2, copes, varcopes):
import numpy as np
from nipype.interfaces.fsl import ImageStats
# check if copes are empty and fix them
concat_copes = []
concat_varcopes = []
evs_l2_new_dict = {}
for j in range(len(evs_l2)):
evs_l2_new_dict[list(evs_l2)[j]] = []
for i in range(len(copes)):
stats = ImageStats(in_file=copes[i], op_string='-R')
res = stats.run()
if res.outputs.out_stat != [0., 0.]:
concat_copes.append(copes[i])
concat_varcopes.append(varcopes[i])
for j in range(len(evs_l2)):
evs_l2_new_dict[list(evs_l2)[j]].append(evs_l2[list(evs_l2)[j]][i])
return evs_l2_new_dict, concat_copes, concat_varcopes
empty_copes_check = pe.MapNode(util.Function(input_names=['evs_l2', 'copes', 'varcopes'],
output_names=['evs_l2', 'copes', 'varcopes'],
function=zero_copes_check),
iterfield=['evs_l2', 'copes', 'varcopes'],
name='empty_copes_check')
wf.connect(contrastgen_l2, 'evs_l2', empty_copes_check, 'evs_l2')
wf.connect(warpfunc, ('out_copes', sort_copes), empty_copes_check, 'copes')
wf.connect(warpfunc, ('out_varcopes', sort_copes), empty_copes_check, 'varcopes')
"""
Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and
varcopes for each condition
"""
copemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
iterfield=['in_files'],
name="copemerge")
varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
iterfield=['in_files'],
name="varcopemerge")
wf.connect(empty_copes_check, 'copes', copemerge, 'in_files')
wf.connect(empty_copes_check, 'varcopes', varcopemerge, 'in_files')
"""
Use MultipleRegressDesign to generate subject and condition
specific level 2 model design files
"""
level2model = pe.MapNode(interface=fsl.MultipleRegressDesign(),
iterfield=['regressors'],
name='l2model')
wf.connect(contrastgen_l2, 'contrasts', level2model, 'contrasts')
wf.connect(empty_copes_check, 'evs_l2', level2model, 'regressors')
"""
Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model
"""
flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe'), name="flameo",
iterfield=['cope_file', 'var_cope_file', 'design_file', 't_con_file','cov_split_file'])
pickfirst = lambda x: x[0]
wf.connect([(copemerge, flameo, [('merged_file', 'cope_file')]),
(varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
(level2model, flameo, [('design_mat', 'design_file'),
('design_con', 't_con_file'),
('design_grp', 'cov_split_file')]),
])
flameo.inputs.mask_file = template_mask
"""
Saving
"""
datasink = pe.Node(nio.DataSink(), name='sinker')
datasink.inputs.base_directory = os.path.join(project_dir, "level2s", "model" + model_id)
wf.connect(infosource, 'subject_id', datasink, 'container')
wf.connect(highpass, 'out_file', datasink, 'filtered_func_data')
wf.connect([(flameo, datasink, [('stats_dir', 'stats_dir')])])
"""
RUN
"""
outgraph = wf.run()
# outgraph = wf.run(plugin='MultiProc')
|
|
from datetime import date
from django.test import TestCase
#from unittest import TestCase
import words.dataretrieval
import words.requesthandler
import words.databaseinput
from words.emailsending import *
from words.models import Document_Data, Word_Data, Sentiment_Dict, Articles_Can
import csv
import decimal
from datetime import date
# Create your tests here.
import gensim
class DataRetrievalTests(TestCase):
def setUp(self):
self.sentDict = []
self.artCan = []
self.docData = []
self.wordData = []
with open(r'words/sentiment_mock.csv', 'r') as csvfile: # iterate over docs in the CSV file
file = csv.DictReader(csvfile)
for line in file:
self.sentDict.append(Sentiment_Dict(word=line['Word'], valence=line['Valence'],arousal=line['Arousal'],dominance=0.0,concreteness=0.0,aoa=0.0))
for item in self.sentDict:
item.save()
with open(r'words/corpus_mock.csv', 'r') as csvfile: # iterate over docs in the CSV file
file = csv.DictReader(csvfile)
for line in file:
self.artCan.append(Articles_Can(article_id=line['articleID'], language=line['language'],province=line['province'],city=line['city'],country=line['country'],publicationDate=line['publicationDate'],wordCount=line['wordCount'],parsed_article=line['parsedArticle']))
words = line['parsedArticle'].split()
avgArousal = decimal.Decimal(0.0)
avgValence = decimal.Decimal(0.0)
wordCounts = {}
for wd in words:
sent = Sentiment_Dict.objects.get(word=wd)
avgArousal = avgArousal + sent.arousal
avgValence = avgValence + sent.valence
if wd not in wordCounts:
wordCounts[wd] = 0
wordCounts[wd] = wordCounts[wd] + 1
for k,v in wordCounts.items():
self.wordData.append(Word_Data(word=k,article_id=line['articleID'],word_count=v,term_frequency=0,tfidf=0))
avgArousal = avgArousal/len(words)
avgValence = avgValence/len(words)
self.docData.append(Document_Data(article_id=line['articleID'], language=line['language'],province=line['province'],city=line['city'],country=line['country'],publication_Date=line['publicationDate'],word_count=len(words),average_arousal_doc=avgArousal,average_valence_doc=avgValence,average_arousal_words=0,average_valence_words=0))
for item in self.artCan:
item.save()
for item in self.docData:
item.save()
for item in self.wordData:
item.save()
def testGetArousal(self):
self.assertEqual(words.dataretrieval.getArousal('minors'), 0.57294869)
self.assertEqual(words.dataretrieval.getArousal('computer'), 0.37374821)
self.assertEqual(words.dataretrieval.getArousal('response'), 0.36475822)
self.assertEqual(words.dataretrieval.getArousal('kitten'), None)
def testGetValence(self):
self.assertEqual(words.dataretrieval.getValence('minors'), 0.42352934)
self.assertEqual(words.dataretrieval.getValence('computer'), 0.76485739)
self.assertEqual(words.dataretrieval.getValence('response'), 0.77583958)
self.assertEqual(words.dataretrieval.getArousal('kitten'), None)
def testGetDocuments(self):
startDate = date(2008, 2, 17)
endDate = date(2011, 11, 7)
#print(words.dataretrieval.getDocuments(startDate, endDate))
startDate = date(2010, 11, 7)
endDate = date(2015, 1, 31)
#print(words.dataretrieval.getDocuments(startDate, endDate))
def testGetDocumentData(self):
startDate = date(2008, 2, 17)
endDate = date(2011, 11, 7)
for doc in words.dataretrieval.getDocumentData(startDate, endDate):
#print(doc.article_id)
pass
#print()
startDate = date(2010, 11, 7)
endDate = date(2015, 1, 31)
for doc in words.dataretrieval.getDocumentData(startDate, endDate):
#print(doc.article_id)
pass
def testGetDocumentDataWithWordFilter(self):
startDate = date(2008,2,17)
endDate = date(2011,11,7)
docs = words.dataretrieval.getDocumentDataWithWordFilter(startDate, endDate, ['interface'])
ids = []
for item in docs:
ids.append(item.article_id)
self.assertTrue(10 in ids and 4 not in ids)
self.assertTrue(9 not in ids and 31 in ids)
def testGetWordData(self):
systemData = words.dataretrieval.getWordData('system')
for wd in systemData:
#print(wd.article_id, wd.word_count)
pass
def testGetWordsInDocument(self):
wordsIn1 = words.dataretrieval.getWordsInDocument(Document_Data.objects.get(article_id=1))
wordsIn4 = words.dataretrieval.getWordsInDocument(Document_Data.objects.get(article_id=4))
wordsIn145 = words.dataretrieval.getWordsInDocument(Document_Data.objects.get(article_id=145))
self.assertTrue('system' in wordsIn4)
self.assertTrue('human' in wordsIn4)
self.assertTrue('eps' in wordsIn4)
self.assertTrue('human' in wordsIn1)
self.assertTrue(wordsIn4.count('system') == 5)
self.assertTrue(wordsIn4.count('human') == 1)
self.assertTrue(wordsIn4.count('eps') == 1)
self.assertTrue(wordsIn4.count('survey') == 0)
self.assertTrue(wordsIn145.count('computer') == 1)
def testGetNumWordsInCorpus(self):
startDate = date(2008, 2, 17)
endDate = date(2011, 11, 7)
docs = words.dataretrieval.getDocumentData(startDate, endDate)
self.assertTrue(words.dataretrieval.getNumWordsInCorpus(docs) == 67)
def testGetNumWordInCorpus(self):
startDate = date(2008, 2, 17)
endDate = date(2011, 11, 7)
docs = words.dataretrieval.getDocumentData(startDate, endDate)
self.assertTrue(words.dataretrieval.getNumWordInCorpus(docs, 'system') == 26)
def testSplitDocuments(self):
startDate = date(2008, 2, 17)
endDate = date(2011, 11, 7)
docs = words.dataretrieval.getDocumentData(startDate, endDate)
splitYear = words.dataretrieval.splitDocuments(docs, 'Year')
for k,v in splitYear.items():
#print('Year: ', k)
for doc in v:
#print(doc.article_id)
pass
#print()
pass
splitMonth = words.dataretrieval.splitDocuments(docs, 'Month')
for k,v in splitMonth.items():
#print('Y/M: ', k)
for doc in v:
#print(doc.article_id)
pass
#print()
pass
class RequestHandlerTests(TestCase):
def setUp(self):
self.sentDict = []
self.artCan = []
self.docData = []
self.wordData = []
with open(r'words/sentiment_mock.csv', 'r') as csvfile: # iterate over docs in the CSV file
file = csv.DictReader(csvfile)
for line in file:
self.sentDict.append(Sentiment_Dict(word=line['Word'], valence=line['Valence'],arousal=line['Arousal'],dominance=0.0,concreteness=0.0,aoa=0.0))
for item in self.sentDict:
item.save()
with open(r'words/corpus_mock.csv', 'r') as csvfile: # iterate over docs in the CSV file
file = csv.DictReader(csvfile)
for line in file:
self.artCan.append(Articles_Can(article_id=line['articleID'], language=line['language'],province=line['province'],city=line['city'],country=line['country'],publicationDate=line['publicationDate'],wordCount=line['wordCount'],parsed_article=line['parsedArticle']))
words = line['parsedArticle'].split()
avgArousal = decimal.Decimal(0.0)
avgValence = decimal.Decimal(0.0)
wordCounts = {}
for wd in words:
sent = Sentiment_Dict.objects.get(word=wd)
avgArousal = avgArousal + sent.arousal
avgValence = avgValence + sent.valence
if wd not in wordCounts:
wordCounts[wd] = 0
wordCounts[wd] = wordCounts[wd] + 1
for k,v in wordCounts.items():
self.wordData.append(Word_Data(word=k,article_id=line['articleID'],word_count=v,term_frequency=0,tfidf=0))
avgArousal = avgArousal/len(words)
avgValence = avgValence/len(words)
self.docData.append(Document_Data(article_id=line['articleID'], language=line['language'],province=line['province'],city=line['city'],country=line['country'],publication_Date=line['publicationDate'],word_count=len(words),average_arousal_doc=avgArousal,average_valence_doc=avgValence,average_arousal_words=0,average_valence_words=0))
for item in self.artCan:
item.save()
for item in self.docData:
item.save()
for item in self.wordData:
item.save()
# working
def testCosDistanceOverTime(self):
dateRange = (date(2008, 2, 17), date(2010, 11, 11))
granularity = 'Year'
request = words.requesthandler.CosDistanceOverTimeRequest(dateRange, granularity, [('human', 'system')], False, 'test1')
result = request.execute()
print('Cos Distance over time')
print(result.xTitle, result.xValues)
for k,v in result.yValues.items():
print(k, v)
print("Errors: " + str(result.errors))
print()
# working
def testTfidfOverTime(self):
dateRange = (date(2008, 2, 17), date(2011, 11, 11))
granularity = 'Year'
request = words.requesthandler.TfidfOverTimeRequest(dateRange, granularity, ['human'], 'test2')
result = request.execute()
print('Average Tfidf over time')
print(result.xTitle, result.xValues)
for k,v in result.yValues.items():
print(k, v)
print("Errors: " + str(result.errors))
print()
# working
def testNClosestNeighboursOverTime(self):
dateRange = (date(2008, 2, 17), date(2010, 11, 11))
granularity = 'Year'
request = words.requesthandler.NClosestNeighboursOverTimeRequest(dateRange, granularity, ['human', 'system', 'interface'], 2, True, 'test3')
result = request.execute()
print('N Closest Neighbours over time')
print(result.xTitle, result.xValues)
for k,v in result.yValues.items():
print(k, v)
print("Errors: " + str(result.errors))
print()
# working
def testAverageArousalOverTime(self):
dateRange = (date(2008, 1, 1), date(2012, 12, 31))
granularity = 'Year'
request = words.requesthandler.AverageArousalOverTimeRequest(dateRange, granularity, ['human', 'system'], 'test4')
result = request.execute()
print('Average Arousal over time')
print(result.xTitle, result.xValues)
for k,v in result.yValues.items():
print(v)
print("Errors: " + str(result.errors))
print()
# working
def testAverageValenceOverTime(self):
dateRange = (date(2008, 1, 1), date(2011, 12, 31))
granularity = 'Year'
request = words.requesthandler.AverageValenceOverTimeRequest(dateRange, granularity, ['user'], 'test5')
result = request.execute()
print('Average Valence over time')
print(result.xTitle, result.xValues)
for k,v in result.yValues.items():
print(v)
print("Errors: " + str(result.errors))
print()
# working
def testAverageArousalTopFiveOverTime(self):
dateRange = (date(2013, 1, 1), date(2013, 12, 31))
granularity = 'Month'
request = words.requesthandler.AverageArousalFiveWordsOverTimeRequest(dateRange, granularity, ['trees'], 'test6')
result = request.execute()
print('Average Arousal Top Five Words over time')
print(result.xTitle, result.xValues)
for k,v in result.yValues.items():
print(v)
print("Errors: " + str(result.errors))
print()
# working
def testAverageValenceTopFiveOverTime(self):
dateRange = (date(2008, 2, 17), date(2011, 11, 11))
granularity = 'Year'
request = words.requesthandler.AverageValenceFiveWordsOverTimeRequest(dateRange, granularity, [], 'test7')
result = request.execute()
print('Average Valence Top Five Words over time')
print(result.xTitle, result.xValues)
for k,v in result.yValues.items():
print(v)
print("Errors: " + str(result.errors))
print()
# working
def testPairwiseProbabilities(self):
dateRange = (date(2008, 2, 17), date(2010, 12, 31))
granularity = 'Year'
request = words.requesthandler.PairwiseProbabilitiesOverTimeRequest(dateRange, granularity, [('human', 'user')] , 'test8')
result = request.execute()
print('Pairwise Probabilities over time')
print(result.xTitle, result.xValues)
for k,v in result.yValues.items():
print(k, v)
print("Errors: " + str(result.errors))
print()
# working
def testWordFrequency(self):
dateRange = (date(2008, 1, 1), date(2010, 12, 31))
granularity = 'Year'
request = words.requesthandler.WordFrequencyOverTimeRequest(dateRange, granularity, ['human'], 'test9')
result = request.execute()
#result.generateCSV('test9')
print('Word Frequency over time')
print(result.xTitle, result.xValues)
for k,v in result.yValues.items():
print(k, v)
print("Errors: " + str(result.errors))
print()
# working
def testRelativeWordFrequency(self):
dateRange = (date(2013, 1, 1), date(2013, 12, 31))
granularity = 'Month'
request = words.requesthandler.RelativeWordFrequencyOverTimeRequest(dateRange, granularity, ['human', 'tomato'], 'test10')
result = request.execute()
print('Relative Word Frequency over time')
print(result.xTitle, result.xValues)
for k,v in result.yValues.items():
print(k, v)
print("Errors: " + str(result.errors))
print()
# django does not support testing this concurrently, but it does seem to work
def testRequestsExecuteThread(self):
reqs = []
dateRange = (date(2008, 2, 17), date(2010, 12, 31))
granularity = 'Year'
request = words.requesthandler.PairwiseProbabilitiesOverTimeRequest(dateRange, granularity, [('human', 'user')] , 'test11')
reqs.append(request)
dateRange = (date(2008, 2, 17), date(2010, 11, 11))
request = words.requesthandler.NClosestNeighboursOverTimeRequest(dateRange, granularity, ['human', 'system', 'interface'], 2, True, 'test12')
reqs.append(request)
ret = words.requesthandler.RequestsExecuteThread(reqs, r'tommy3@ualberta.ca')
ret.run()
# will be implemented once there is a clearer way to test the analysis process. probably with help from client
class DataAnalyzerTests(TestCase):
def setUp(self):
self.sentDict = []
self.artCan = []
self.docData = []
self.wordData = []
with open(r'words/sentiment_mock.csv', 'r') as csvfile: # iterate over docs in the CSV file
file = csv.DictReader(csvfile)
for line in file:
self.sentDict.append(Sentiment_Dict(word=line['Word'], valence=line['Valence'],arousal=line['Arousal'],dominance=0.0,concreteness=0.0,aoa=0.0))
for item in self.sentDict:
item.save()
with open(r'words/corpus_mock.csv', 'r') as csvfile: # iterate over docs in the CSV file
file = csv.DictReader(csvfile)
for line in file:
self.artCan.append(Articles_Can(article_id=line['articleID'], language=line['language'],province=line['province'],city=line['city'],country=line['country'],publicationDate=line['publicationDate'],wordCount=line['wordCount'],parsed_article=line['parsedArticle']))
words = line['parsedArticle'].split()
avgArousal = decimal.Decimal(0.0)
avgValence = decimal.Decimal(0.0)
wordCounts = {}
for wd in words:
sent = Sentiment_Dict.objects.get(word=wd)
avgArousal = avgArousal + sent.arousal
avgValence = avgValence + sent.valence
if wd not in wordCounts:
wordCounts[wd] = 0
wordCounts[wd] = wordCounts[wd] + 1
for k,v in wordCounts.items():
self.wordData.append(Word_Data(word=k,article_id=line['articleID'],word_count=v,term_frequency=0,tfidf=0))
avgArousal = avgArousal/len(words)
avgValence = avgValence/len(words)
self.docData.append(Document_Data(article_id=line['articleID'], language=line['language'],province=line['province'],city=line['city'],country=line['country'],publication_Date=line['publicationDate'],word_count=len(words),average_arousal_doc=avgArousal,average_valence_doc=avgValence,average_arousal_words=0,average_valence_words=0))
def testSaveMatrix(self):
pass
#class DataInputTests(TestCase):
#def testenterSentiment(self):
#words.databaseinput.enterSentiment('words/sentiment_mock.csv')
#result = sentiment_dict.objects.all()
#print('Sentiment_Mock in database:')
#for v in result:
#print(v)
#def testCorpusInput(self):
#words.databaseinput.enterArticles('words/corpus_mock.csv')
#result = articles_can.objects.all()
#print('corpus_Mock in database:')
#for v in result:
#print(v)
#def testDataInput(self):
#words.databaseinput.run('words/sentiment_mock.csv', 'words/corpus_mock.csv')
#pass
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: swift_conn
# You'll see swift_conn passed around a few places in this file. This is the
# source httplib connection of whatever it is attached to.
# It is used when early termination of reading from the connection should
# happen, such as when a range request is satisfied but there's still more the
# source connection would like to send. To prevent having to read all the data
# that could be left, the source connection can be .close() and then reads
# commence to empty out any buffers.
# These shenanigans are to ensure all related objects can be garbage
# collected. We've seen objects hang around forever otherwise.
import mimetypes
import os
from ConfigParser import ConfigParser
from gettext import gettext as _
from random import shuffle
from time import time
from eventlet import Timeout
from swift.common.ring import Ring
from swift.common.utils import cache_from_env, get_logger, \
get_remote_client, split_path, config_true_value, generate_trans_id, \
affinity_key_function, affinity_locality_predicate
from swift.common.constraints import check_utf8
from swift.proxy.controllers import AccountController, ObjectController, \
ContainerController
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \
HTTPServerError, Request
class Application(object):
"""WSGI application for the proxy server."""
def __init__(self, conf, memcache=None, logger=None, account_ring=None,
container_ring=None, object_ring=None):
if conf is None:
conf = {}
if logger is None:
self.logger = get_logger(conf, log_route='proxy-server')
else:
self.logger = logger
swift_dir = conf.get('swift_dir', '/etc/swift')
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.put_queue_depth = int(conf.get('put_queue_depth', 10))
self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
self.trans_id_suffix = conf.get('trans_id_suffix', '')
self.error_suppression_interval = \
int(conf.get('error_suppression_interval', 60))
self.error_suppression_limit = \
int(conf.get('error_suppression_limit', 10))
self.recheck_container_existence = \
int(conf.get('recheck_container_existence', 60))
self.recheck_account_existence = \
int(conf.get('recheck_account_existence', 60))
self.allow_account_management = \
config_true_value(conf.get('allow_account_management', 'no'))
self.object_post_as_copy = \
config_true_value(conf.get('object_post_as_copy', 'true'))
self.resellers_conf = ConfigParser()
self.resellers_conf.read(os.path.join(swift_dir, 'resellers.conf'))
self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
self.container_ring = container_ring or Ring(swift_dir,
ring_name='container')
self.account_ring = account_ring or Ring(swift_dir,
ring_name='account')
self.memcache = memcache
mimetypes.init(mimetypes.knownfiles +
[os.path.join(swift_dir, 'mime.types')])
self.account_autocreate = \
config_true_value(conf.get('account_autocreate', 'no'))
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
'expiring_objects'
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
self.max_containers_per_account = \
int(conf.get('max_containers_per_account') or 0)
self.max_containers_whitelist = [
a.strip()
for a in conf.get('max_containers_whitelist', '').split(',')
if a.strip()]
self.deny_host_headers = [
host.strip() for host in
conf.get('deny_host_headers', '').split(',') if host.strip()]
self.rate_limit_after_segment = \
int(conf.get('rate_limit_after_segment', 10))
self.rate_limit_segments_per_sec = \
int(conf.get('rate_limit_segments_per_sec', 1))
self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
self.cors_allow_origin = [
a.strip()
for a in conf.get('cors_allow_origin', '').split(',')
if a.strip()]
self.node_timings = {}
self.timing_expiry = int(conf.get('timing_expiry', 300))
self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
self.allow_static_large_object = config_true_value(
conf.get('allow_static_large_object', 'true'))
self.max_large_object_get_time = float(
conf.get('max_large_object_get_time', '86400'))
value = conf.get('request_node_count', '2 * replicas').lower().split()
if len(value) == 1:
value = int(value[0])
self.request_node_count = lambda r: value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
value = int(value[0])
self.request_node_count = lambda r: value * r.replica_count
else:
raise ValueError(
'Invalid request_node_count value: %r' % ''.join(value))
try:
read_affinity = conf.get('read_affinity', '')
self.read_affinity_sort_key = affinity_key_function(read_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid read_affinity value: %r (%s)" %
(read_affinity, err.message))
try:
write_affinity = conf.get('write_affinity', '')
self.write_affinity_is_local_fn \
= affinity_locality_predicate(write_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid write_affinity value: %r (%s)" %
(write_affinity, err.message))
value = conf.get('write_affinity_node_count',
'2 * replicas').lower().split()
if len(value) == 1:
value = int(value[0])
self.write_affinity_node_count = lambda r: value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
value = int(value[0])
self.write_affinity_node_count = lambda r: value * r.replica_count
else:
raise ValueError(
'Invalid write_affinity_node_count value: %r' % ''.join(value))
swift_owner_headers = conf.get(
'swift_owner_headers',
'x-container-read, x-container-write, '
'x-container-sync-key, x-container-sync-to, '
'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2')
self.swift_owner_headers = [
name.strip()
for name in swift_owner_headers.split(',') if name.strip()]
def get_controller(self, path):
"""
Get the controller to handle a request.
:param path: path from request
:returns: tuple of (controller class, path dictionary)
:raises: ValueError (thrown by split_path) if given invalid path
"""
version, account, container, obj = split_path(path, 1, 4, True)
d = dict(version=version,
account_name=account,
container_name=container,
object_name=obj)
if obj and container and account:
return ObjectController, d
elif container and account:
return ContainerController, d
elif account and not container and not obj:
return AccountController, d
return None, d
def __call__(self, env, start_response):
"""
WSGI entry point.
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
try:
if self.memcache is None:
self.memcache = cache_from_env(env)
req = self.update_request(Request(env))
return self.handle_request(req)(env, start_response)
except UnicodeError:
err = HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
return err(env, start_response)
except (Exception, Timeout):
start_response('500 Server Error',
[('Content-Type', 'text/plain')])
return ['Internal server error.\n']
def update_request(self, req):
if 'x-storage-token' in req.headers and \
'x-auth-token' not in req.headers:
req.headers['x-auth-token'] = req.headers['x-storage-token']
return req
def handle_request(self, req):
"""
Entry point for proxy server.
Should return a WSGI-style callable (such as swob.Response).
:param req: swob.Request object
"""
try:
self.logger.set_statsd_prefix('proxy-server')
if req.content_length and req.content_length < 0:
self.logger.increment('errors')
return HTTPBadRequest(request=req,
body='Invalid Content-Length')
try:
if not check_utf8(req.path_info):
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
except UnicodeError:
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
try:
controller, path_parts = self.get_controller(req.path)
p = req.path_info
if isinstance(p, unicode):
p = p.encode('utf-8')
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if not controller:
self.logger.increment('errors')
return HTTPPreconditionFailed(request=req, body='Bad URL')
if self.deny_host_headers and \
req.host.split(':')[0] in self.deny_host_headers:
return HTTPForbidden(request=req, body='Invalid host header')
self.logger.set_statsd_prefix('proxy-server.' +
controller.server_type.lower())
controller = controller(self, **path_parts)
if 'swift.trans_id' not in req.environ:
# if this wasn't set by an earlier middleware, set it now
trans_id = generate_trans_id(self.trans_id_suffix)
req.environ['swift.trans_id'] = trans_id
self.logger.txn_id = trans_id
req.headers['x-trans-id'] = req.environ['swift.trans_id']
controller.trans_id = req.environ['swift.trans_id']
self.logger.client_ip = get_remote_client(req)
try:
handler = getattr(controller, req.method)
getattr(handler, 'publicly_accessible')
except AttributeError:
allowed_methods = getattr(controller, 'allowed_methods', set())
return HTTPMethodNotAllowed(
request=req, headers={'Allow': ', '.join(allowed_methods)})
if path_parts['version']:
req.path_info_pop()
if 'swift.authorize' in req.environ:
# We call authorize before the handler, always. If authorized,
# we remove the swift.authorize hook so isn't ever called
# again. If not authorized, we return the denial unless the
# controller's method indicates it'd like to gather more
# information and try again later.
resp = req.environ['swift.authorize'](req)
if not resp:
# No resp means authorized, no delayed recheck required.
del req.environ['swift.authorize']
else:
# Response indicates denial, but we might delay the denial
# and recheck later. If not delayed, return the error now.
if not getattr(handler, 'delay_denial', None):
return resp
# Save off original request method (GET, POST, etc.) in case it
# gets mutated during handling. This way logging can display the
# method the client actually sent.
req.environ['swift.orig_req_method'] = req.method
return handler(req)
except (Exception, Timeout):
self.logger.exception(_('ERROR Unhandled exception in request'))
return HTTPServerError(request=req)
def sort_nodes(self, nodes):
'''
Sorts nodes in-place (and returns the sorted list) according to
the configured strategy. The default "sorting" is to randomly
shuffle the nodes. If the "timing" strategy is chosen, the nodes
are sorted according to the stored timing data.
'''
# In the case of timing sorting, shuffling ensures that close timings
# (ie within the rounding resolution) won't prefer one over another.
# Python's sort is stable (http://wiki.python.org/moin/HowTo/Sorting/)
shuffle(nodes)
if self.sorting_method == 'timing':
now = time()
def key_func(node):
timing, expires = self.node_timings.get(node['ip'], (-1.0, 0))
return timing if expires > now else -1.0
nodes.sort(key=key_func)
elif self.sorting_method == 'affinity':
nodes.sort(key=self.read_affinity_sort_key)
return nodes
def set_node_timing(self, node, timing):
if self.sorting_method != 'timing':
return
now = time()
timing = round(timing, 3) # sort timings to the millisecond
self.node_timings[node['ip']] = (timing, now + self.timing_expiry)
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI proxy apps."""
conf = global_conf.copy()
conf.update(local_conf)
return Application(conf)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The VectorDiffeomixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import AffineLinearOperator
from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import SoftmaxCentered
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical as categorical_lib
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.linalg import linear_operator_addition as linop_add_lib
from tensorflow.python.ops.linalg import linear_operator_diag as linop_diag_lib
from tensorflow.python.ops.linalg import linear_operator_full_matrix as linop_full_lib
from tensorflow.python.ops.linalg import linear_operator_identity as linop_identity_lib
from tensorflow.python.ops.linalg import linear_operator_lower_triangular as linop_tril_lib
from tensorflow.python.util import deprecation
__all__ = [
"VectorDiffeomixture",
"quadrature_scheme_softmaxnormal_gauss_hermite",
"quadrature_scheme_softmaxnormal_quantiles",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def quadrature_scheme_softmaxnormal_gauss_hermite(
normal_loc, normal_scale, quadrature_size,
validate_args=False, name=None):
"""Use Gauss-Hermite quadrature to form quadrature on `K - 1` simplex.
A `SoftmaxNormal` random variable `Y` may be generated via
```
Y = SoftmaxCentered(X),
X = Normal(normal_loc, normal_scale)
```
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_softmaxnormal_quantiles`.
Args:
normal_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`, B>=0.
The location parameter of the Normal used to construct the SoftmaxNormal.
normal_scale: `float`-like `Tensor`. Broadcastable with `normal_loc`.
The scale parameter of the Normal used to construct the SoftmaxNormal.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
convex combination of affine parameters for `K` components.
`grid[..., :, n]` is the `n`-th grid point, living in the `K - 1` simplex.
probs: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
associated with each grid point.
"""
with ops.name_scope(name, "quadrature_scheme_softmaxnormal_gauss_hermite",
[normal_loc, normal_scale]):
normal_loc = ops.convert_to_tensor(normal_loc, name="normal_loc")
dt = normal_loc.dtype.base_dtype
normal_scale = ops.convert_to_tensor(
normal_scale, dtype=dt, name="normal_scale")
normal_scale = maybe_check_quadrature_param(
normal_scale, "normal_scale", validate_args)
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
grid = grid.astype(dt.dtype.as_numpy_dtype)
probs = probs.astype(dt.dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = ops.convert_to_tensor(probs, name="probs", dtype=dt)
grid = softmax(
-distribution_util.pad(
(normal_loc[..., array_ops.newaxis] +
np.sqrt(2.) * normal_scale[..., array_ops.newaxis] * grid),
axis=-2,
front=True),
axis=-2) # shape: [B, components, deg]
return grid, probs
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def quadrature_scheme_softmaxnormal_quantiles(
normal_loc, normal_scale, quadrature_size,
validate_args=False, name=None):
"""Use SoftmaxNormal quantiles to form quadrature on `K - 1` simplex.
A `SoftmaxNormal` random variable `Y` may be generated via
```
Y = SoftmaxCentered(X),
X = Normal(normal_loc, normal_scale)
```
Args:
normal_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`, B>=0.
The location parameter of the Normal used to construct the SoftmaxNormal.
normal_scale: `float`-like `Tensor`. Broadcastable with `normal_loc`.
The scale parameter of the Normal used to construct the SoftmaxNormal.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
convex combination of affine parameters for `K` components.
`grid[..., :, n]` is the `n`-th grid point, living in the `K - 1` simplex.
probs: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
associated with each grid point.
"""
with ops.name_scope(name, "softmax_normal_grid_and_probs",
[normal_loc, normal_scale]):
normal_loc = ops.convert_to_tensor(normal_loc, name="normal_loc")
dt = normal_loc.dtype.base_dtype
normal_scale = ops.convert_to_tensor(
normal_scale, dtype=dt, name="normal_scale")
normal_scale = maybe_check_quadrature_param(
normal_scale, "normal_scale", validate_args)
dist = normal_lib.Normal(loc=normal_loc, scale=normal_scale)
def _get_batch_ndims():
"""Helper to get dist.batch_shape.ndims, statically if possible."""
ndims = dist.batch_shape.ndims
if ndims is None:
ndims = array_ops.shape(dist.batch_shape_tensor())[0]
return ndims
batch_ndims = _get_batch_ndims()
def _get_final_shape(qs):
"""Helper to build `TensorShape`."""
bs = dist.batch_shape.with_rank_at_least(1)
num_components = bs[-1].value
if num_components is not None:
num_components += 1
tail = tensor_shape.TensorShape([num_components, qs])
return bs[:-1].concatenate(tail)
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = array_ops.zeros([], dtype=dist.dtype)
edges = math_ops.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = array_ops.reshape(edges, shape=array_ops.concat([
[-1], array_ops.ones([batch_ndims], dtype=dtypes.int32)], axis=0))
quantiles = dist.quantile(edges)
quantiles = SoftmaxCentered().forward(quantiles)
# Cyclically permute left by one.
perm = array_ops.concat([
math_ops.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = array_ops.transpose(quantiles, perm)
quantiles.set_shape(_get_final_shape(quadrature_size + 1))
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
grid.set_shape(_get_final_shape(quadrature_size))
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = array_ops.fill(
dims=[quadrature_size],
value=1. / math_ops.cast(quadrature_size, dist.dtype))
return grid, probs
class VectorDiffeomixture(distribution_lib.Distribution):
"""VectorDiffeomixture distribution.
A vector diffeomixture (VDM) is a distribution parameterized by a convex
combination of `K` component `loc` vectors, `loc[k], k = 0,...,K-1`, and `K`
`scale` matrices `scale[k], k = 0,..., K-1`. It approximates the following
[compound distribution]
(https://en.wikipedia.org/wiki/Compound_probability_distribution)
```none
p(x) = int p(x | z) p(z) dz,
where z is in the K-simplex, and
p(x | z) := p(x | loc=sum_k z[k] loc[k], scale=sum_k z[k] scale[k])
```
The integral `int p(x | z) p(z) dz` is approximated with a quadrature scheme
adapted to the mixture density `p(z)`. The `N` quadrature points `z_{N, n}`
and weights `w_{N, n}` (which are non-negative and sum to 1) are chosen
such that
```q_N(x) := sum_{n=1}^N w_{n, N} p(x | z_{N, n}) --> p(x)```
as `N --> infinity`.
Since `q_N(x)` is in fact a mixture (of `N` points), we may sample from
`q_N` exactly. It is important to note that the VDM is *defined* as `q_N`
above, and *not* `p(x)`. Therefore, sampling and pdf may be implemented as
exact (up to floating point error) methods.
A common choice for the conditional `p(x | z)` is a multivariate Normal.
The implemented marginal `p(z)` is the `SoftmaxNormal`, which is a
`K-1` dimensional Normal transformed by a `SoftmaxCentered` bijector, making
it a density on the `K`-simplex. That is,
```
Z = SoftmaxCentered(X),
X = Normal(mix_loc / temperature, 1 / temperature)
```
The default quadrature scheme chooses `z_{N, n}` as `N` midpoints of
the quantiles of `p(z)` (generalized quantiles if `K > 2`).
See [Dillon and Langmore (2018)][1] for more details.
#### About `Vector` distributions in TensorFlow.
The `VectorDiffeomixture` is a non-standard distribution that has properties
particularly useful in [variational Bayesian
methods](https://en.wikipedia.org/wiki/Variational_Bayesian_methods).
Conditioned on a draw from the SoftmaxNormal, `X|z` is a vector whose
components are linear combinations of affine transformations, thus is itself
an affine transformation.
Note: The marginals `X_1|v, ..., X_d|v` are *not* generally identical to some
parameterization of `distribution`. This is due to the fact that the sum of
draws from `distribution` are not generally itself the same `distribution`.
#### About `Diffeomixture`s and reparameterization.
The `VectorDiffeomixture` is designed to be reparameterized, i.e., its
parameters are only used to transform samples from a distribution which has no
trainable parameters. This property is important because backprop stops at
sources of stochasticity. That is, as long as the parameters are used *after*
the underlying source of stochasticity, the computed gradient is accurate.
Reparametrization means that we can use gradient-descent (via backprop) to
optimize Monte-Carlo objectives. Such objectives are a finite-sample
approximation of an expectation and arise throughout scientific computing.
WARNING: If you backprop through a VectorDiffeomixture sample and the "base"
distribution is both: not `FULLY_REPARAMETERIZED` and a function of trainable
variables, then the gradient is not guaranteed correct!
#### Examples
```python
tfd = tf.contrib.distributions
# Create two batches of VectorDiffeomixtures, one with mix_loc=[0.],
# another with mix_loc=[1]. In both cases, `K=2` and the affine
# transformations involve:
# k=0: loc=zeros(dims) scale=LinearOperatorScaledIdentity
# k=1: loc=[2.]*dims scale=LinOpDiag
dims = 5
vdm = tfd.VectorDiffeomixture(
mix_loc=[[0.], [1]],
temperature=[1.],
distribution=tfd.Normal(loc=0., scale=1.),
loc=[
None, # Equivalent to `np.zeros(dims, dtype=np.float32)`.
np.float32([2.]*dims),
],
scale=[
tf.linalg.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
tf.linalg.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
validate_args=True)
```
#### References
[1]: Joshua Dillon and Ian Langmore. Quadrature Compound: An approximating
family of distributions. _arXiv preprint arXiv:1801.03080_, 2018.
https://arxiv.org/abs/1801.03080
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
mix_loc,
temperature,
distribution,
loc=None,
scale=None,
quadrature_size=8,
quadrature_fn=quadrature_scheme_softmaxnormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name="VectorDiffeomixture"):
"""Constructs the VectorDiffeomixture on `R^d`.
The vector diffeomixture (VDM) approximates the compound distribution
```none
p(x) = int p(x | z) p(z) dz,
where z is in the K-simplex, and
p(x | z) := p(x | loc=sum_k z[k] loc[k], scale=sum_k z[k] scale[k])
```
Args:
mix_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`.
In terms of samples, larger `mix_loc[..., k]` ==>
`Z` is more likely to put more weight on its `kth` component.
temperature: `float`-like `Tensor`. Broadcastable with `mix_loc`.
In terms of samples, smaller `temperature` means one component is more
likely to dominate. I.e., smaller `temperature` makes the VDM look more
like a standard mixture of `K` components.
distribution: `tf.Distribution`-like instance. Distribution from which `d`
iid samples are used as input to the selected affine transformation.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
a VectorDiffeomixture sample and the `distribution` is not
`FULLY_REPARAMETERIZED` yet is a function of trainable variables, then
the gradient will be incorrect!
loc: Length-`K` list of `float`-type `Tensor`s. The `k`-th element
represents the `shift` used for the `k`-th affine transformation. If
the `k`-th item is `None`, `loc` is implicitly `0`. When specified,
must have shape `[B1, ..., Bb, d]` where `b >= 0` and `d` is the event
size.
scale: Length-`K` list of `LinearOperator`s. Each should be
positive-definite and operate on a `d`-dimensional vector space. The
`k`-th element represents the `scale` used for the `k`-th affine
transformation. `LinearOperator`s must have shape `[B1, ..., Bb, d, d]`,
`b >= 0`, i.e., characterizes `b`-batches of `d x d` matrices
quadrature_size: Python `int` scalar representing number of
quadrature points. Larger `quadrature_size` means `q_N(x)` better
approximates `p(x)`.
quadrature_fn: Python callable taking `normal_loc`, `normal_scale`,
`quadrature_size`, `validate_args` and returning `tuple(grid, probs)`
representing the SoftmaxNormal grid and corresponding normalized weight.
normalized) weight.
Default value: `quadrature_scheme_softmaxnormal_quantiles`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if `not scale or len(scale) < 2`.
ValueError: if `len(loc) != len(scale)`
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
ValueError: if `validate_args` and any not scale.is_positive_definite.
TypeError: if any scale.dtype != scale[0].dtype.
TypeError: if any loc.dtype != scale[0].dtype.
NotImplementedError: if `len(scale) != 2`.
ValueError: if `not distribution.is_scalar_batch`.
ValueError: if `not distribution.is_scalar_event`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[mix_loc, temperature]) as name:
if not scale or len(scale) < 2:
raise ValueError("Must specify list (or list-like object) of scale "
"LinearOperators, one for each component with "
"num_component >= 2.")
if loc is None:
loc = [None]*len(scale)
if len(loc) != len(scale):
raise ValueError("loc/scale must be same-length lists "
"(or same-length list-like objects).")
dtype = scale[0].dtype.base_dtype
loc = [ops.convert_to_tensor(loc_, dtype=dtype, name="loc{}".format(k))
if loc_ is not None else None
for k, loc_ in enumerate(loc)]
for k, scale_ in enumerate(scale):
if validate_args and not scale_.is_positive_definite:
raise ValueError("scale[{}].is_positive_definite = {} != True".format(
k, scale_.is_positive_definite))
if scale_.dtype.base_dtype != dtype:
raise TypeError(
"dtype mismatch; scale[{}].base_dtype=\"{}\" != \"{}\"".format(
k, scale_.dtype.base_dtype.name, dtype.name))
self._endpoint_affine = [
AffineLinearOperator(shift=loc_,
scale=scale_,
validate_args=validate_args,
name="endpoint_affine_{}".format(k))
for k, (loc_, scale_) in enumerate(zip(loc, scale))]
# TODO(jvdillon): Remove once we support k-mixtures.
# We make this assertion here because otherwise `grid` would need to be a
# vector not a scalar.
if len(scale) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(scale)))
mix_loc = ops.convert_to_tensor(
mix_loc, dtype=dtype, name="mix_loc")
temperature = ops.convert_to_tensor(
temperature, dtype=dtype, name="temperature")
self._grid, probs = tuple(quadrature_fn(
mix_loc / temperature,
1. / temperature,
quadrature_size,
validate_args))
# Note: by creating the logits as `log(prob)` we ensure that
# `self.mixture_distribution.logits` is equivalent to
# `math_ops.log(self.mixture_distribution.probs)`.
self._mixture_distribution = categorical_lib.Categorical(
logits=math_ops.log(probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
self._grid = control_flow_ops.with_dependencies(
asserts, self._grid)
self._distribution = distribution
self._interpolated_affine = [
AffineLinearOperator(shift=loc_,
scale=scale_,
validate_args=validate_args,
name="interpolated_affine_{}".format(k))
for k, (loc_, scale_) in enumerate(zip(
interpolate_loc(self._grid, loc),
interpolate_scale(self._grid, scale)))]
[
self._batch_shape_,
self._batch_shape_tensor_,
self._event_shape_,
self._event_shape_tensor_,
] = determine_batch_event_shapes(self._grid,
self._endpoint_affine)
super(VectorDiffeomixture, self).__init__(
dtype=dtype,
# We hard-code `FULLY_REPARAMETERIZED` because when
# `validate_args=True` we verify that indeed
# `distribution.reparameterization_type == FULLY_REPARAMETERIZED`. A
# distribution which is a function of only non-trainable parameters
# also implies we can use `FULLY_REPARAMETERIZED`. However, we cannot
# easily test for that possibility thus we use `validate_args=False`
# as a "back-door" to allow users a way to use non
# `FULLY_REPARAMETERIZED` distribution. In such cases IT IS THE USERS
# RESPONSIBILITY to verify that the base distribution is a function of
# non-trainable parameters.
reparameterization_type=distribution_lib.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
distribution._graph_parents # pylint: disable=protected-access
+ [loc_ for loc_ in loc if loc_ is not None]
+ [p for scale_ in scale for p in scale_.graph_parents]),
name=name)
@property
def mixture_distribution(self):
"""Distribution used to select a convex combination of affine transforms."""
return self._mixture_distribution
@property
def distribution(self):
"""Base scalar-event, scalar-batch distribution."""
return self._distribution
@property
def grid(self):
"""Grid of mixing probabilities, one for each grid point."""
return self._grid
@property
def endpoint_affine(self):
"""Affine transformation for each of `K` components."""
return self._endpoint_affine
@property
def interpolated_affine(self):
"""Affine transformation for each convex combination of `K` components."""
return self._interpolated_affine
def _batch_shape_tensor(self):
return self._batch_shape_tensor_
def _batch_shape(self):
return self._batch_shape_
def _event_shape_tensor(self):
return self._event_shape_tensor_
def _event_shape(self):
return self._event_shape_
def _sample_n(self, n, seed=None):
x = self.distribution.sample(
sample_shape=concat_vectors(
[n],
self.batch_shape_tensor(),
self.event_shape_tensor()),
seed=seed) # shape: [n, B, e]
x = [aff.forward(x) for aff in self.endpoint_affine]
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = self.batch_shape.num_elements()
if batch_size is None:
batch_size = array_ops.reduce_prod(self.batch_shape_tensor())
mix_batch_size = self.mixture_distribution.batch_shape.num_elements()
if mix_batch_size is None:
mix_batch_size = math_ops.reduce_prod(
self.mixture_distribution.batch_shape_tensor())
ids = self.mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
[batch_size // mix_batch_size])),
seed=distribution_util.gen_new_seed(
seed, "vector_diffeomixture"))
# We need to flatten batch dims in case mixture_distribution has its own
# batch dims.
ids = array_ops.reshape(ids, shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
np.int32([-1]))))
# Stride `components * quadrature_size` for `batch_size` number of times.
stride = self.grid.shape.with_rank_at_least(
2)[-2:].num_elements()
if stride is None:
stride = array_ops.reduce_prod(
array_ops.shape(self.grid)[-2:])
offset = math_ops.range(start=0,
limit=batch_size * stride,
delta=stride,
dtype=ids.dtype)
weight = array_ops.gather(
array_ops.reshape(self.grid, shape=[-1]),
ids + offset)
# At this point, weight flattened all batch dims into one.
# We also need to append a singleton to broadcast with event dims.
if self.batch_shape.is_fully_defined():
new_shape = [-1] + self.batch_shape.as_list() + [1]
else:
new_shape = array_ops.concat(
([-1], self.batch_shape_tensor(), [1]), axis=0)
weight = array_ops.reshape(weight, shape=new_shape)
if len(x) != 2:
# We actually should have already triggered this exception. However as a
# policy we're putting this exception wherever we exploit the bimixture
# assumption.
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(x)))
# Alternatively:
# x = weight * x[0] + (1. - weight) * x[1]
x = weight * (x[0] - x[1]) + x[1]
return x
def _log_prob(self, x):
# By convention, we always put the grid points right-most.
y = array_ops.stack(
[aff.inverse(x) for aff in self.interpolated_affine],
axis=-1)
log_prob = math_ops.reduce_sum(self.distribution.log_prob(y), axis=-2)
# Because the affine transformation has a constant Jacobian, it is the case
# that `affine.fldj(x) = -affine.ildj(x)`. This is not true in general.
fldj = array_ops.stack([
aff.forward_log_det_jacobian(
x,
event_ndims=array_ops.rank(self.event_shape_tensor())
) for aff in self.interpolated_affine], axis=-1)
return math_ops.reduce_logsumexp(
self.mixture_distribution.logits - fldj + log_prob, axis=-1)
def _mean(self):
p = self._expand_mix_distribution_probs()
m = self._expand_base_distribution_mean()
mean = None
for k, aff in enumerate(self.interpolated_affine):
# aff.forward is going to do this:
# y = array_ops.squeeze(aff.scale.matmul(m), axis=[-1])
# if aff.shift is not None:
# y += aff.shift
mean = add(mean, p[..., k] * aff.forward(m))
return mean
def _covariance(self):
# Law of total variance:
#
# Cov[Z] = E[Cov[Z | V]] + Cov[E[Z | V]]
#
# where,
#
# E[Cov[Z | V]] = sum_i mix_prob[i] Scale[i]
# Cov[E[Z | V]] = sum_i mix_prob[i] osquare(loc[i])
# - osquare(sum_i mix_prob[i] loc[i])
#
# osquare(x) = x.transpose @ x
return add(
self._mean_of_covariance_given_quadrature_component(diag_only=False),
self._covariance_of_mean_given_quadrature_component(diag_only=False))
def _variance(self):
# Equivalent to: tf.diag_part(self._covariance()),
return add(
self._mean_of_covariance_given_quadrature_component(diag_only=True),
self._covariance_of_mean_given_quadrature_component(diag_only=True))
def _mean_of_covariance_given_quadrature_component(self, diag_only):
p = self.mixture_distribution.probs
# To compute E[Cov(Z|V)], we'll add matrices within three categories:
# scaled-identity, diagonal, and full. Then we'll combine these at the end.
scale_identity_multiplier = None
diag = None
full = None
for k, aff in enumerate(self.interpolated_affine):
s = aff.scale # Just in case aff.scale has side-effects, we'll call once.
if (s is None
or isinstance(s, linop_identity_lib.LinearOperatorIdentity)):
scale_identity_multiplier = add(scale_identity_multiplier,
p[..., k, array_ops.newaxis])
elif isinstance(s, linop_identity_lib.LinearOperatorScaledIdentity):
scale_identity_multiplier = add(
scale_identity_multiplier,
(p[..., k, array_ops.newaxis] * math_ops.square(s.multiplier)))
elif isinstance(s, linop_diag_lib.LinearOperatorDiag):
diag = add(diag, (p[..., k, array_ops.newaxis] *
math_ops.square(s.diag_part())))
else:
x = (p[..., k, array_ops.newaxis, array_ops.newaxis] *
s.matmul(s.to_dense(), adjoint_arg=True))
if diag_only:
x = array_ops.matrix_diag_part(x)
full = add(full, x)
# We must now account for the fact that the base distribution might have a
# non-unity variance. Recall that, since X ~ iid Law(X_0),
# `Cov(SX+m) = S Cov(X) S.T = S S.T Diag(Var(X_0))`.
# We can scale by `Var(X)` (vs `Cov(X)`) since X corresponds to `d` iid
# samples from a scalar-event distribution.
v = self.distribution.variance()
if scale_identity_multiplier is not None:
scale_identity_multiplier *= v
if diag is not None:
diag *= v[..., array_ops.newaxis]
if full is not None:
full *= v[..., array_ops.newaxis]
if diag_only:
# Apparently we don't need the full matrix, just the diagonal.
r = add(diag, full)
if r is None and scale_identity_multiplier is not None:
ones = array_ops.ones(self.event_shape_tensor(), dtype=self.dtype)
return scale_identity_multiplier[..., array_ops.newaxis] * ones
return add(r, scale_identity_multiplier)
# `None` indicates we don't know if the result is positive-definite.
is_positive_definite = (True if all(aff.scale.is_positive_definite
for aff in self.endpoint_affine)
else None)
to_add = []
if diag is not None:
to_add.append(linop_diag_lib.LinearOperatorDiag(
diag=diag,
is_positive_definite=is_positive_definite))
if full is not None:
to_add.append(linop_full_lib.LinearOperatorFullMatrix(
matrix=full,
is_positive_definite=is_positive_definite))
if scale_identity_multiplier is not None:
to_add.append(linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=self.event_shape_tensor()[0],
multiplier=scale_identity_multiplier,
is_positive_definite=is_positive_definite))
return (linop_add_lib.add_operators(to_add)[0].to_dense()
if to_add else None)
def _covariance_of_mean_given_quadrature_component(self, diag_only):
square = math_ops.square if diag_only else vec_osquare
p = self._expand_mix_distribution_probs()
if not diag_only:
p = p[..., array_ops.newaxis, :] # Assuming event.ndims=1.
m = self._expand_base_distribution_mean()
cov_e_z_given_v = None
e_z_given_v = self._mean()
for k, aff in enumerate(self.interpolated_affine):
y = aff.forward(m)
cov_e_z_given_v = add(cov_e_z_given_v,
p[..., k] * square(y - e_z_given_v))
return cov_e_z_given_v
def _expand_base_distribution_mean(self):
"""Ensures `self.distribution.mean()` has `[batch, event]` shape."""
single_draw_shape = concat_vectors(self.batch_shape_tensor(),
self.event_shape_tensor())
m = array_ops.reshape(
self.distribution.mean(), # A scalar.
shape=array_ops.ones_like(single_draw_shape,
dtype=dtypes.int32))
m = array_ops.tile(m, multiples=single_draw_shape)
m.set_shape(self.batch_shape.concatenate(self.event_shape))
return m
def _expand_mix_distribution_probs(self):
p = self.mixture_distribution.probs # [B, deg]
deg = p.shape.with_rank_at_least(1)[-1].value
if deg is None:
deg = array_ops.shape(p)[-1]
event_ndims = self.event_shape.ndims
if event_ndims is None:
event_ndims = array_ops.shape(self.event_shape_tensor())[0]
expand_shape = array_ops.concat([
self.mixture_distribution.batch_shape_tensor(),
array_ops.ones([event_ndims], dtype=dtypes.int32),
[deg],
], axis=0)
return array_ops.reshape(p, shape=expand_shape)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def maybe_check_quadrature_param(param, name, validate_args):
"""Helper which checks validity of `loc` and `scale` init args."""
with ops.name_scope(name="check_" + name, values=[param]):
assertions = []
if param.shape.ndims is not None:
if param.shape.ndims == 0:
raise ValueError("Mixing params must be a (batch of) vector; "
"{}.rank={} is not at least one.".format(
name, param.shape.ndims))
elif validate_args:
assertions.append(check_ops.assert_rank_at_least(
param, 1,
message=("Mixing params must be a (batch of) vector; "
"{}.rank is not at least one.".format(
name))))
# TODO(jvdillon): Remove once we support k-mixtures.
if param.shape.with_rank_at_least(1)[-1] is not None:
if param.shape[-1].value != 1:
raise NotImplementedError("Currently only bimixtures are supported; "
"{}.shape[-1]={} is not 1.".format(
name, param.shape[-1].value))
elif validate_args:
assertions.append(check_ops.assert_equal(
array_ops.shape(param)[-1], 1,
message=("Currently only bimixtures are supported; "
"{}.shape[-1] is not 1.".format(name))))
if assertions:
return control_flow_ops.with_dependencies(assertions, param)
return param
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def determine_batch_event_shapes(grid, endpoint_affine):
"""Helper to infer batch_shape and event_shape."""
with ops.name_scope(name="determine_batch_event_shapes"):
# grid # shape: [B, k, q]
# endpoint_affine # len=k, shape: [B, d, d]
batch_shape = grid.shape[:-2]
batch_shape_tensor = array_ops.shape(grid)[:-2]
event_shape = None
event_shape_tensor = None
def _set_event_shape(shape, shape_tensor):
if event_shape is None:
return shape, shape_tensor
return (array_ops.broadcast_static_shape(event_shape, shape),
array_ops.broadcast_dynamic_shape(
event_shape_tensor, shape_tensor))
for aff in endpoint_affine:
if aff.shift is not None:
batch_shape = array_ops.broadcast_static_shape(
batch_shape, aff.shift.shape[:-1])
batch_shape_tensor = array_ops.broadcast_dynamic_shape(
batch_shape_tensor, array_ops.shape(aff.shift)[:-1])
event_shape, event_shape_tensor = _set_event_shape(
aff.shift.shape[-1:], array_ops.shape(aff.shift)[-1:])
if aff.scale is not None:
batch_shape = array_ops.broadcast_static_shape(
batch_shape, aff.scale.batch_shape)
batch_shape_tensor = array_ops.broadcast_dynamic_shape(
batch_shape_tensor, aff.scale.batch_shape_tensor())
event_shape, event_shape_tensor = _set_event_shape(
tensor_shape.TensorShape([aff.scale.range_dimension]),
aff.scale.range_dimension_tensor()[array_ops.newaxis])
return batch_shape, batch_shape_tensor, event_shape, event_shape_tensor
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def interpolate_loc(grid, loc):
"""Helper which interpolates between two locs."""
if len(loc) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(loc)))
deg = grid.shape.with_rank_at_least(1)[-1].value
if deg is None:
raise ValueError("Num quadrature grid points must be known prior "
"to graph execution.")
with ops.name_scope("interpolate_loc", values=[grid, loc]):
if loc is None or loc[0] is None and loc[1] is None:
return [None]*deg
# shape: [B, 1, k, deg]
w = grid[..., array_ops.newaxis, :, :]
loc = [x[..., array_ops.newaxis] # shape: [B, e, 1]
if x is not None else None for x in loc]
if loc[0] is None:
x = w[..., 1, :] * loc[1] # shape: [B, e, deg]
elif loc[1] is None:
x = w[..., 0, :] * loc[0] # shape: [B, e, deg]
else:
delta = loc[0] - loc[1]
x = w[..., 0, :] * delta + loc[1] # shape: [B, e, deg]
return [x[..., k] for k in range(deg)] # list(shape:[B, e])
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def interpolate_scale(grid, scale):
"""Helper which interpolates between two scales."""
if len(scale) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(scale)))
deg = grid.shape.with_rank_at_least(1)[-1].value
if deg is None:
raise ValueError("Num quadrature grid points must be known prior "
"to graph execution.")
with ops.name_scope("interpolate_scale", values=[grid]):
return [linop_add_lib.add_operators([
linop_scale(grid[..., k, q], s)
for k, s in enumerate(scale)
])[0] for q in range(deg)]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def linop_scale(w, op):
# We assume w > 0. (This assumption only relates to the is_* attributes.)
with ops.name_scope("linop_scale", values=[w]):
# TODO(b/35301104): LinearOperatorComposition doesn't combine operators, so
# special case combinations here. Once it does, this function can be
# replaced by:
# return linop_composition_lib.LinearOperatorComposition([
# scaled_identity(w), op])
def scaled_identity(w):
return linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=op.range_dimension_tensor(),
multiplier=w,
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, linop_identity_lib.LinearOperatorIdentity):
return scaled_identity(w)
if isinstance(op, linop_identity_lib.LinearOperatorScaledIdentity):
return scaled_identity(w * op.multiplier)
if isinstance(op, linop_diag_lib.LinearOperatorDiag):
return linop_diag_lib.LinearOperatorDiag(
diag=w[..., array_ops.newaxis] * op.diag_part(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, linop_tril_lib.LinearOperatorLowerTriangular):
return linop_tril_lib.LinearOperatorLowerTriangular(
tril=w[..., array_ops.newaxis, array_ops.newaxis] * op.to_dense(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
raise NotImplementedError(
"Unsupported Linop type ({})".format(type(op).__name__))
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [distribution_util.static_value(x) for x in args]
if any(vec is None for vec in args_):
return array_ops.concat(args, axis=0)
return [val for vec in args_ for val in vec]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def add(x, y):
"""Adds inputs; interprets `None` as zero."""
if x is None:
return y
if y is None:
return x
return x + y
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def vec_osquare(x):
"""Computes the outer-product of a (batch of) vector, i.e., x.T x."""
return x[..., :, array_ops.newaxis] * x[..., array_ops.newaxis, :]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def softmax(x, axis, name=None):
"""Equivalent to tf.nn.softmax but works around b/70297725."""
with ops.name_scope(name, "softmax", [x, axis]):
x = ops.convert_to_tensor(x, name="x")
ndims = (x.shape.ndims if x.shape.ndims is not None
else array_ops.rank(x, name="ndims"))
axis = ops.convert_to_tensor(axis, dtype=dtypes.int32, name="axis")
axis_ = tensor_util.constant_value(axis)
if axis_ is not None:
axis = np.int(ndims + axis_ if axis_ < 0 else axis_)
else:
axis = array_ops.where(axis < 0, ndims + axis, axis)
return nn_ops.softmax(x, axis=axis)
|
|
import cProfile
import pstats
import math
import string
import sys
import struct
import numpy as np
import cPickle
import asciitable
import scipy.ndimage
import scipy.stats as ss
import scipy.signal
import scipy as sp
import scipy.odr as odr
import glob
import os
import make_color_image
import make_fake_wht
import gzip
import tarfile
import shutil
import cosmocalc
import congrid
import astropy.io.ascii as ascii
import warnings
import subprocess
import photutils
import astropy
import astropy.cosmology
import astropy.io.fits as pyfits
import astropy.units as u
from astropy.cosmology import WMAP7,z_at_value
from astropy.coordinates import SkyCoord
import copy
import medianstats_bootstrap as msbs
import illustris_python as ilpy
import h5py
import requests
from multiprocessing import Process, Queue, current_process
import time
import illustris_lightcone_catalogs as ilc
import translate_coordinates as tc
ilh = 0.704
illcos = astropy.cosmology.FlatLambdaCDM(H0=70.4,Om0=0.2726,Ob0=0.0456)
start_time = time.time()
defaultparams={'stars':'Coordinates,Velocities,GFM_StellarFormationTime,GFM_Metallicity,GFM_InitialMass,Masses','gas':'Coordinates,Density,ElectronAbundance,Masses,Velocities,Volume,SubfindDensity,Potential,InternalEnergy,StarFormationRate,GFM_Metallicity,GFM_AGNRadiation,GFM_WindDMVelDisp,GFM_CoolingRate,NeutralHydrogenAbundance,SmoothingLength,SubfindHsml,SubfindVelDisp,NumTracers,ParticleIDs','dm':'Coordinates,Velocities,Potential'}
baseUrl = 'http://www.illustris-project.org/api/'
headers = {"api-key":"117782db3bf216d7ce7a04d0c9034601"}
class subhalo_observation:
def __init__(self,sim,sn,sfid,i,label):
self.sim=sim
self.sn=sn
self.sfid=sfid
self.i=i
self.label=label
def get(path, params=None,savepath=None):
# make HTTP GET request to path
r = requests.get(path, params=params, headers=headers)
# raise exception if response code is not HTTP SUCCESS (200)
r.raise_for_status()
if r.headers['content-type'] == 'application/json':
return r.json() # parse json responses automatically
if 'content-disposition' in r.headers:
file_basename = r.headers['content-disposition'].split("filename=")[1]
if savepath is not None:
filename = os.path.join(savepath,file_basename)
else:
filename = file_basename
with open(filename, 'wb') as f:
f.write(r.content)
return filename # return the filename string
return r
def get_subhalo(sim,snap,sfid,params=defaultparams,savepath=None,verbose=True,clobber=False):
relative_path = sim+"/snapshots/" + str(snap) + "/subhalos/" + str(sfid)
url = "http://www.illustris-project.org/api/"+relative_path
#sub = get(url)
if savepath is not None:
savepath = os.path.join(savepath,relative_path)
if not os.path.lexists(savepath):
os.makedirs(savepath)
checkf = os.path.join(savepath,'cutout_'+str(sfid)+'.hdf5')
npyf = os.path.join(savepath,'sub_'+str(sfid)+'.npy')
else:
checkf = 'cutout_'+str(sfid)+'.hdf5'
npyf = 'sub_'+str(sfid)+'.npy'
if os.path.lexists(checkf) and os.path.lexists(npyf) and clobber is False:
if verbose:
print "Subhalo cutout exists, skipping: ", checkf
download = False
subobj = np.load(npyf)
sub = subobj.all() #?
return checkf, sub, download
else:
if verbose:
print "Getting subhalo cutout: ", checkf
#subhalo metadata
try:
sub = get(url)
np.save(npyf,sub)
file = get(url+"/cutout.hdf5",params,savepath)
download = True
except HTTPError as h:
file = None
sub = None
download = False
return file, sub, download
def process_subhalo(sim,snap,sfid,i,label,camera_obj,params=defaultparams,savepath=None,verbose=True,clobber=False,getlabel='StellarMass',resample=False):
#defaultparams={'stars':'Coordinates,Velocities,GFM_StellarFormationTime,GFM_Metallicity,GFM_InitialMass,Masses'}
file, sub, downloaded = get_subhalo(sim,snap,sfid,params=defaultparams,savepath=savepath,verbose=verbose,clobber=clobber)
substuff = None
if i % 100 ==0:
print "Finished: ", i, snap, sfid, downloaded, (time.time() - start_time)/60.0, file
sys.stdout.flush()
if file is None:
print "Subhalo Failed: ", i, snap, sfid, downloaded, file
sys.stdout.flush()
return (file, substuff)
#check if resampled cutout exists, if so don't bother loading
dirname = os.path.dirname(file)
resampfile = os.path.join(dirname,'resampled_'+str(sfid)+'.hdf5')
resamp_exists = os.path.lexists(resampfile)
#if not resamp_exists:
#load HDF5 file relevant fields
# with h5py.File(file) as h5f:
# masses = np.asarray(h5f['PartType4']['Masses'])*(1.0e10)/ilh #convert to solar masses
# formfactors = np.asarray(h5f['PartType4']['GFM_StellarFormationTime']) #in scale factor
# xpos = np.asarray(h5f['PartType4']['Coordinates'][:,0])-pos[0] #in ckpc/h
# ypos = np.asarray(h5f['PartType4']['Coordinates'][:,1])-pos[1]
# zpos = np.asarray(h5f['PartType4']['Coordinates'][:,2])-pos[2]
#print masses.shape, ypos.shape, np.mean(ypos)
#resample star particles
#project into an image to return... or output Sunrise input files as needed
#return sunrise directory... relative to savepath?
sundir = os.path.join(dirname,label)
if not os.path.lexists(sundir):
os.mkdir(sundir)
#sfrhist -- translate to center of subhalo
#mcrx -- move cameras to negative subhalo position, set FOV smartly
#camera position = -camdir*distance; distance from lightcone catalog Z and tz ? or camdir dot XYZ ?
#broadband -- always redshift?
#ideas for options: F435W, F606W, F850LP, F105W, F125W, F160W, NIRCAM, Mstar, Mgas, SFR, Simonsized H-alpha lines?
return (file, substuff)
def worker(input,output,**kwargs):
for func, args in iter(input.get,'STOP'):
f = calculate(func,args,**kwargs)
output.put(f)
def calculate(func,args,**kwargs):
result = func(*args,**kwargs)
#return '%s was given %s and got %s' % \
# (current_process().name, args, result)
return result
def get_lightcone_images_threaded(lcfile,geofile,sim='Illustris-2',clobber=False,savepath='/astro/snyder_lab2/Illustris',Np=2,maxq=10000,lim=None,label='FIELD'):
data = ascii.read(lcfile)
snapnums = np.asarray(data['col1'],dtype='str')
sfids = np.asarray(data['col2'],dtype='str')
x_cmpc = np.asarray(data['col13'])
y_cmpc = np.asarray(data['col14'])
z_cmpc = np.asarray(data['col15'])
coords_ra = np.asarray(data['col3']) #degrees
coords_dec = np.asarray(data['col4']) #degrees
catalog = ilc.process_lightcone_catalog(lightcone=geofile,basedir='.')
print catalog.delb_arcmin
NUMBER_OF_PROCESSES=Np
task_queue = Queue()
done_queue = Queue()
TASKS = []
TASKS_DONE = []
TASKS_LEFT = []
N_objects = snapnums.shape[0]
#figure out how to drain the queue before it fills the pipe
if lim is None:
lim=np.int64(N_objects)
print "Items to process: ", lim
for i,sn in enumerate(snapnums[0:lim]):
this_sfid = sfids[i]
this_x = x_cmpc[i]
this_y = y_cmpc[i]
this_z = z_cmpc[i]
this_ra = coords_ra[i]
this_dec = coords_dec[i]
task = (process_subhalo,(sim,sn,this_sfid,i,label,None))
if i <= maxq:
task_queue.put(task)
TASKS.append(task)
else:
TASKS_LEFT.append(task)
for p in range(NUMBER_OF_PROCESSES):
Process(target=worker,args=(task_queue,done_queue),kwargs={'savepath':savepath,'verbose':False,'clobber':clobber}).start()
cutout_files = []
while len(TASKS_LEFT) > 0:
cutout_files.append(done_queue.get())
newtask = TASKS_LEFT.pop()
task_queue.put(newtask)
for i in range(min(maxq,lim)):
cutout_files.append(done_queue.get())
#build up images/analysis here!
print cutout_files[0:5]
print cutout_files[-5:]
print len(cutout_files)
for p in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
return None
def get_lightcone_images(lcfile,geofile,sim='Illustris-2',clobber=False,savepath='/astro/snyder_lab2/Illustris'):
data = ascii.read(lcfile)
snapnums = np.asarray(data['col1'],dtype='str')
sfids = np.asarray(data['col2'],dtype='str')
for i,sn in enumerate(snapnums):
this_sfid = sfids[i]
#get file. will skip download if it already exists
f = get_subhalo(sim,sn,this_sfid,savepath=savepath,verbose=True,clobber=clobber)
#obtain fields, place at desired position, project, compute densities and luminosities
#for projection, how? use lightcone direction? if so, must save somewhere!
return
def do_lightcone_images(savepath=None):
if savepath is None:
print "Requires savepath specification!"
exit()
lcfile = os.path.expandvars('$HOME/oasis_project/Lightcones/Illustris-2_RADEC_hudfwide_75Mpc_7_6_xyz_corners.txt')
geofile = os.path.expandvars('$HOME/oasis_project/Lightcones/hudfwide_75Mpc_7_6_fixedh_xyz_NEW.txt')
label = 'FIELDA'
#note, for Gordon, stage intermediate lightcone data on local scratch? Do this outside python: FASTER
#if not os.path.lexists(final_savepath):
# os.mkdir(final_savepath)
#print "Staging files... ", final_savepath, intermediate_savepath
#staget = time.time()
#shutil.move(final_savepath,intermediate_savepath)
#staget = time.time() - staget
#print "Staging took: ", staget, ' seconds'
#st1 = time.time()
#get_lightcone_images(lcfile,geofile,sim='Illustris-2',clobber=False,savepath=savepath)
#et1 = time.time()
st = time.time()
result = get_lightcone_images_threaded(lcfile,geofile,sim='Illustris-2',clobber=False,savepath=savepath,Np=8,lim=None,maxq=10000,label=label)
et = time.time()
#return files to temp project space
#print "UnStaging files... ", intermediate_savepath, final_savepath
#staget = time.time()
#shutil.move(intermediate_savepath,final_savepath)
#staget = time.time() - staget
#print "Un-Staging took: ", staget, ' seconds'
print 'Threaded calculation took: ', et-st, ' seconds'
#print 'Serial calculation took: ', et1-st1, ' seconds'
if __name__=="__main__":
if len(sys.argv) != 2:
print "Usage: python illustris_api_utils_gordon.py SAVEPATH"
exit()
else:
savepath=sys.argv[1]
print "Saving intermediate files at: ", savepath
do_lightcone_images(savepath=savepath)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class TransformsOperations(object):
"""TransformsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.media.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
account_name, # type: str
filter=None, # type: Optional[str]
orderby=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TransformCollection"]
"""List Transforms.
Lists the Transforms in the account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param filter: Restricts the set of items returned.
:type filter: str
:param orderby: Specifies the key by which the result collection should be ordered.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TransformCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.media.models.TransformCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TransformCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TransformCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms'} # type: ignore
def get(
self,
resource_group_name, # type: str
account_name, # type: str
transform_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Transform"
"""Get Transform.
Gets a Transform.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param transform_name: The Transform name.
:type transform_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Transform, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Transform
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Transform"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'transformName': self._serialize.url("transform_name", transform_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Transform', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
account_name, # type: str
transform_name, # type: str
parameters, # type: "_models.Transform"
**kwargs # type: Any
):
# type: (...) -> "_models.Transform"
"""Create or Update Transform.
Creates or updates a new Transform.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param transform_name: The Transform name.
:type transform_name: str
:param parameters: The request parameters.
:type parameters: ~azure.mgmt.media.models.Transform
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Transform, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Transform
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Transform"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'transformName': self._serialize.url("transform_name", transform_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Transform')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Transform', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Transform', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
account_name, # type: str
transform_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete Transform.
Deletes a Transform.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param transform_name: The Transform name.
:type transform_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'transformName': self._serialize.url("transform_name", transform_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
account_name, # type: str
transform_name, # type: str
parameters, # type: "_models.Transform"
**kwargs # type: Any
):
# type: (...) -> "_models.Transform"
"""Update Transform.
Updates a Transform.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param transform_name: The Transform name.
:type transform_name: str
:param parameters: The request parameters.
:type parameters: ~azure.mgmt.media.models.Transform
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Transform, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Transform
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Transform"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'transformName': self._serialize.url("transform_name", transform_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Transform')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Transform', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/transforms/{transformName}'} # type: ignore
|
|
# -*- coding: utf-8 -*-
# pylint: disable=C0103
"""
k-nearest neighbors classification and regression.
"""
# Author: bertrand-l
# License: BSD
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from .classification import confusion
from .metrics import lp_norm
from .mixin import LeastSquaresErrorMixin
from .predictor import BaseClassificationPredictor, BasePredictor
from .util import assert_positive, assert_in, assert_Xy, strfmt, strtab
__all__ = 'KNearestClass', 'KNearestRegress'
#==============================================================================
#
# k-nearest neighbors: brute force
#
#==============================================================================
def argksmallest(array, k):
"""
Returns indices of the k smallest elements of `array`, sorted.
"""
if k <= 0:
raise ValueError('`k` must be positive.')
elif k == 1:
indices = np.array([np.argmin(array)])
else:
try:
# Fast and O(n), but requires numpy >= 1.8.0
indices = np.argpartition(array, k)[:k]
except AttributeError:
if len(array) > 10 ** 7:
# O(n) too, but only faster than sorting for large arrays
import heapq
from itertools import count
result = heapq.nsmallest(k, zip(x, count(firstval=0, step=1)))
indices = np.array([r[1] for r in result])
else:
# for small arrays sorting with numpy is just faster
indices = np.argsort(array)[:k]
return indices
def argknn_brute(X, x, k, p=2):
"""
Find k-nearest neigbors using brute force.
Parameters
----------
X : array_like
Set of points/feature vectors from which neighbors will be chosen.
x : array_like
Point or feature vector whose neighbors will be returned.
k : int > 0
Number of neighbors.
p : int >= 1
Order of the Lp norm.
Returns
-------
indices : array
Indices of `x`'s k nearest neighbors.
"""
if not k > 0:
raise ValueError("'k' is not positive.")
X, x = np.asarray(X), np.asarray(x)
norm = lp_norm(p=p, root=False, axis=0)
return argksmallest([norm(x - x_) for x_ in X], k)
#==============================================================================
#
# k-nearest neighbors: kd-tree
# warning: very inefficient implementation...
#
#==============================================================================
class KDTreeNode(object):
"""
Node of a kd-tree.
"""
__slots__ = ("depth", "child_less", "child_more", "coordinate", "index",
"threshold")
def __init__(self, depth=None):
self.depth = depth
self.child_less = None
self.child_more = None
self.coordinate = None
self.index = None
self.threshold = None
def __str__(self):
pad = " " + " " * self.depth
txt = ""
if self.depth == 0:
txt = "KDTree object\n"
txt += "{0}{1}".format(pad, self.info)
if not self.is_leaf:
if self.depth > 9:
txt += "\n{0} ...".format(pad)
else:
txt += "\n{0}\n{1}".format(self.child_less, self.child_more)
return txt
def child(self, x):
"""Returns the child node where `x` belongs."""
node = None
if (self.threshold is not None and self.child_less is not None and
x[self.coordinate] < self.threshold):
node = self.child_less
elif (self.threshold is not None and self.child_more is not None and
x[self.coordinate] >= self.threshold):
node = self.child_more
return node
def children(self, x, depthmax=None):
"""Generator over children nodes where `x` belongs."""
node = self
while node is not None:
yield node
if depthmax is not None and node.depth == depthmax:
node = None
else:
node = node.child(x)
@property
def is_leaf(self):
return self.child_less is None and self.child_more is None
@property
def info(self):
"""String containing basic information about this node."""
if isinstance(self.threshold, (float, np.float_)):
thresh = "{0:.4g}".format(self.threshold)
else:
thresh = "{0}".format(self.threshold)
return ("{0}: coordinate = {1}, threshold = {2}, index = {3}"
.format(self.depth, self.coordinate, thresh, self.index))
def leaf(self, x):
"""Returns the leaf node where `x` belongs."""
for node in self.children(x):
pass
return node
def kdtree(X):
"""
Returns a balanced kd-tree without support for insertions or deletions.
This tree only stores the indices of each record/sample, not the values
themselves. Also, note that the build process could probably be made faster
in cases where n_samples >> n_features by pre-sorting each component of X,
but that is not implemented (yet).
"""
def build_node(indices, depth):
"""Recursively add nodes one by one."""
indices = np.asarray(indices)
if len(indices) == 0:
return None
node = KDTreeNode(depth)
coordinate = depth % n_features
median = np.median(X[indices, coordinate])
node.coordinate = coordinate
node.threshold = median
node.index = indices[np.argmin(np.abs(X[indices, coordinate] -
median))]
#node.point = X[node.index]
if len(indices) > 1:
node.child_less = build_node([i for i in indices
if X[i, coordinate] < median],
depth + 1)
node.child_more = build_node([i for i in indices
if X[i, coordinate] >= median],
depth + 1)
return node
X = np.asarray(X)
n_samples, n_features = X.shape
return build_node(list(range(n_samples)), 0)
def argknn_kdtree(X, x, k, tree=None, p=2):
"""
Find k-nearest neigbors using a kd-tree.
Parameters
----------
X : array_like
Set of points/feature vectors from which neighbors will be chosen.
x : array_like
Point or feature vector whose neighbors will be returned.
k : int > 0
Number of neighbors.
p : int >= 1
Order of the Lp norm.
tree : KDTreeNode, optional
Root node of a kd-tree built from `X`. `tree` is built each time this
function is called with tree=None, slowing down performance a lot for
multiple searches on the same data.
Returns
-------
indices : array
Indices of `x`'s k nearest neighbors in `X`.
Raises
------
TypeError, ValueError
"""
def knearest(node, x, distances, indices):
"""Recursively find the k-nearest neighbors."""
if node is None:
return
# check if this node is one of the knn
dist = norm(x - X[node.index])
imax = np.argmax(distances)
if dist < distances[imax] and node.index not in indices:
distances[imax] = dist
indices[imax] = node.index
# check relevant children
dist_perp = np.absolute(x[node.coordinate] -
X[node.index][node.coordinate])
if dist_perp < max(distances):
# some of the knns could be on either side
knearest(node.child_less, x, distances, indices)
knearest(node.child_more, x, distances, indices)
else:
# knns must be on this side
knearest(node.child(x), x, distances, indices)
if not k > 0:
raise ValueError("'k' is not positive.")
if k > len(X):
raise ValueError("'k' is too large.")
elif k == len(X):
return np.arange(len(X))
if tree is None:
tree = kdtree(X)
if not isinstance(tree, KDTreeNode):
raise TypeError("'tree' is not a kd-tree.")
X = np.asarray(X)
x = np.asarray(x)
# initialize
norm = lp_norm(p=p, root=False, axis=0)
indices = - np.ones(k, dtype=np.int_)
distances = np.inf * np.ones(k)
# get distance to leaf node
leaf = tree.leaf(x)
distances[0] = norm(x - X[leaf.index])
indices[0] = leaf.index
# check the other relevant nodes starting from the root
knearest(tree, x, distances, indices)
return indices
#==============================================================================
#
# k-nearest neighbors: mixin for all common code for classification and
# regression
#
#==============================================================================
class KNearestBase(object):
"""
Base class for k-nearest neighbor predictors, since a lot of the code is shared
by kNN regression and classification
Parameters
----------
dexponent : non-negative float
neighbors are weighthed by ``distance ^ {- dexponent}``.
k : positive int
number of nearest neighbors.
method : {'kdtree', 'brute'}
p : positive integer or "inf", optional
Order of the Lp norm (p=2 is the Euclidean norm).
Raises
------
ValueError
"""
def __init__(self, dexponent=0., k=2, method='brute', p=2):
self.settings(dexponent=dexponent, k=k, method=method, p=p)
self._X = None
self._y = None
self._tree = None
def learn(self, X, y):
"""
Learn by storing the training data.
Parameters
----------
X : array_like
Input features, shape=(n_samples, n_features).
y : array_like
targets, shape=(n_samples,). Targets must take at least two
distinct values or classes, and can have any type.
Returns
-------
self : KNearestClass or KNearestRegress object
Raises
------
TypeError, ValueError
"""
X, y = assert_Xy(X, y)
n_samples, n_features = X.shape
classes = np.unique(y)
self._X = X
self._y = y
self._tree = kdtree(X)
self._classes = classes
stats = {}
stats['mean'] = {c: X[y == c].mean(axis=0) for c in classes}
stats['stddev'] = {c: X[y == c].std(axis=0) for c in classes}
self._training_stats.update(stats)
return self
def predict(self, X, regression=False):
"""
Predict the class of the input features.
Parameters
----------
X : array_like
Input features, shape=(n_samples, n_features).
Returns
-------
classes : array_like
"""
X = BasePredictor.predict(self, X)
dexponent = self._settings['dexponent']
k = self._settings['k']
method = self._settings['method']
p = self._settings['p']
wexponent = - dexponent / p
norm = lp_norm(p=p, root=False, axis=1)
y_hat = np.zeros(len(X), dtype=self._y.dtype)
for i, x in enumerate(X):
if method == 'brute':
iksmallest = argknn_brute(self._X, x, k, p=p)
else:
iksmallest = argknn_kdtree(self._X, x, k, p=p, tree=self._tree)
weights = np.zeros(k)
if dexponent == 0.:
weights = np.ones(k) / float(k)
else:
distances = norm(x - self._X[iksmallest])
if min(distances) == 0.:
weights = np.zeros(k)
weights[distances == 0.] = 1.
else:
weights = norm(x - self._X[iksmallest]) ** wexponent
weights /= weights.sum()
if regression:
y_hat[i] = (weights * self._y[iksmallest]).sum()
else:
class_hist = {c: 0. for c in np.unique(self._y[iksmallest])}
for j, iks in enumerate(iksmallest):
class_hist[self._y[iks]] += weights[j]
y_hat[i] = max(class_hist, key=lambda k: class_hist[k])
return y_hat
def settings(self, dexponent=None, k=None, method=None, p=None):
"""
Optionally sets one or several parameters and returns current settings.
Parameters
----------
dexponent : non-negative float
neighbors are weighthed by distance ** (- dexponent).
k : positive int
number of nearest neighbors.
method : {'kdtree', 'brute'}
p : positive integer or "inf", optional
Order of the Lp norm (p=2 is the Euclidean norm).
Raises
------
ValueError
"""
if dexponent is not None:
assert_positive(dexponent, 'dexponent', nonnegative=True)
self._settings['dexponent'] = dexponent
if k is not None:
assert_positive(k, 'k')
self._settings['k'] = k
if method is not None:
method = str(method).replace('-', '').replace(' ', '').lower()
assert_in(method, 'method', ('kdtree', 'brute'))
self._settings['method'] = method
if p is not None:
assert_positive(p, 'p')
self._settings['p'] = p
return self._settings
#==============================================================================
#
# k-nearest neighbors: classification
#
#==============================================================================
class KNearestClass(KNearestBase, BaseClassificationPredictor):
"""
k-nearest neighbor classification.
The k-nearest neighbors earch either uses brute force or a k-d tree. Beware
that the training data is stored by the classifier.
Parameters
----------
dexponent : non-negative float
neighbors are weighthed by ``distance ^ {- dexponent}``.
k : positive int
number of nearest neighbors.
method : {'kdtree', 'brute'}
p : positive integer or "inf", optional
Order of the Lp norm (p=2 is the Euclidean norm) used to compute
distances.
Raises
------
ValueError
"""
def __init__(self, dexponent=0., k=2, method='kdtree', p=2):
BaseClassificationPredictor.__init__(self)
KNearestBase.__init__(self, dexponent=dexponent, k=k,
method=method, p=p)
def learn(self, X, y):
"""
Learn by storing the training data.
Parameters
----------
X : array_like
Input features, shape=(n_samples, n_features).
y : array_like
targets, shape=(n_samples,). Targets must take at least two
distinct values or classes, and can have any type.
Returns
-------
self : KNearestClass object
Raises
------
TypeError, ValueError
"""
BaseClassificationPredictor.learn(self, X, y)
out = KNearestBase.learn(self, X, y)
if len(self.classes) < 2:
raise ValueError("The targets must belong to at least 2 classes.")
return out
def predict(self, X):
"""
Predict the class of the input features.
Parameters
----------
X : array_like
Input features, shape=(n_samples, n_features).
Returns
-------
classes : array_like
"""
X = BasePredictor.predict(self, X)
return KNearestBase.predict(self, X, regression=False)
#==============================================================================
#
# k-nearest neighbors: regression
#
#==============================================================================
class KNearestRegress(KNearestBase, LeastSquaresErrorMixin, BasePredictor):
"""
k-nearest neighbor regression.
The k-nearest neighbors earch either uses brute force or a k-d tree. Beware
that the training data is stored by the classifier.
Parameters
----------
dexponent : non-negative float
neighbors are weighthed by ``distance ^ {- dexponent}``.
k : positive int
number of nearest neighbors.
method : {'kdtree', 'brute'}
p : positive integer or "inf", optional
Order of the Lp norm (p=2 is the Euclidean norm) used to compute
distances.
Raises
------
ValueError
"""
def __init__(self, dexponent=0., k=2, method='kdtree', p=2):
BasePredictor.__init__(self)
KNearestBase.__init__(self, dexponent=dexponent, k=k,
method=method, p=p)
def learn(self, X, y):
"""
Learn by storing the training data.
Parameters
----------
X : array_like
Input features, shape=(n_samples, n_features).
y : array_like
targets, shape=(n_samples,). Targets must take at least two
distinct values or classes, and can have any type.
Returns
-------
self : KNearestClass object
Raises
------
TypeError, ValueError
"""
BasePredictor.learn(self, X)
KNearestBase.learn(self, X, y)
return self
def predict(self, X):
"""
Predict the class of the input features.
Parameters
----------
X : array_like
Input features, shape=(n_samples, n_features).
Returns
-------
classes : array_like
"""
X = BasePredictor.predict(self, X)
return KNearestBase.predict(self, X, regression=True)
|
|
import datetime
import time
import uuid
from functools import wraps
from flask import render_template, abort, request, jsonify, redirect, url_for, flash
from flask_login import login_user, logout_user, current_user
from requests import HTTPError
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import load_only
import process_data
import request_data
from flask_app import app, db, login_manager
from models import Player, Team, Guild, Match, TeamChallenge, GuildChallenge
import commons
@login_manager.user_loader
def load_user(id):
return Player.query.get(id)
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.is_authenticated:
return redirect(url_for('login'))
return f(*args, **kwargs)
return decorated_function
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == "POST":
name = request.form['login-form-username']
region = request.form['login-form-region']
remember_me = 'remember' in request.form
registered_user = Player.query.filter_by(name=name).first()
if registered_user is None:
try:
player = request_data.query_player(name, region)
except (request_data.PlayerNotFound, HTTPError) as e:
flash(e.message)
return redirect(url_for('login'))
process_data.process_player(player, region=region)
registered_user = Player.query.filter_by(name=name).first()
if registered_user is None:
return redirect(url_for('login'))
login_user(registered_user, remember=remember_me)
return redirect(url_for('index'))
return render_template('login.html')
@app.route('/logout/')
def logout():
logout_user()
return render_template('login.html')
@app.route('/')
@app.route('/index/')
def index():
if current_user.is_authenticated:
return render_template('dashboard.html')
else:
return render_template('login.html')
@app.route('/signup/', methods=['GET'])
def subscribe():
return render_template('signup.html')
@app.route('/ajax_signup/', methods=['POST'])
def ajax_subscribe():
error = []
type = request.form['subscribe-form-type']
name = request.form['subscribe-form-name']
tag = request.form['subscribe-form-tag']
description = request.form['subscribe-form-description']
region = request.form['subscribe-form-region']
print(type, name, tag, region)
if len(name) < 2 or len(name) > 16:
error.append('Invalid name')
if len(tag) < 2 or len(tag) > 4:
error.append('Invalid tag')
if not error:
if type == 'team':
team = Team(id=uuid.uuid4(), name=name, tag=tag, shardId=region, description=description, captain=current_user.id)
else:
team = Guild(id=uuid.uuid4(), name=name, tag=tag, shardId=region, description=description, captain=current_user.id)
try:
db.session.add(team)
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
return jsonify({'error': ', '.join(error)})
@app.route('/ajax_join_group/', methods=['POST'])
def ajax_join_group():
error = []
type = request.form['subscribe-form-type']
name = request.form['subscribe-form-name']
user = db.session.query(Player).get(current_user.id)
if type == 'team':
team = db.session.query(Team).filter_by(name=name).one()
user.team_id = team.id
else:
guild = db.session.query(Guild).filter_by(name=name).one()
user.guild_id = guild.id
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
return jsonify({'error': ', '.join(error)})
@app.route('/ajax_leave/<string:group>/', methods=['GET'])
def ajax_leave_group(group):
user = db.session.query(Player).get(current_user.id)
if group == 'team':
user.team_id = None
else:
user.guild_id = None
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
return redirect(url_for('profile'))
@app.route('/profile/', methods=['GET'])
@login_required
def profile():
guilds = db.session.query(Guild).options(load_only("name")).filter_by(shardId=current_user.shardId).all()
teams = db.session.query(Team).options(load_only("name")).filter_by(shardId=current_user.shardId).all()
return render_template('profile.html', teams=teams, guilds=guilds)
@app.route('/profile/<string:player_id>/', methods=['GET'])
def public_profile(player_id):
player = db.session.query(Player).get(player_id)
team = None
guild = None
if player.team_id:
team = db.session.query(Team).get(player.team_id)
if player.guild_id:
guild = db.session.query(Guild).get(player.guild_id)
return render_template('player.html', player=player, guild=guild, team=team)
@app.route('/ajax_update_player/', methods=['POST'])
@login_required
def ajax_update_player():
player_id = request.form['player_id']
player = db.session.query(Player).get(player_id)
if not player:
abort(404)
# print("-- Start casual")
# result = request_data.process_id(player_id, "casual")
# process_data.process_batch_query(result)
# print("-- Finished casual")
print("-- Start ranked")
result = request_data.process_id(player_id, "ranked")
process_data.process_batch_query(result)
print("-- Finished ranked")
return jsonify({'status': 200})
@app.route('/teams/')
def teams():
teams = db.session.query(Team).all()
return render_template('teams.html', teams=teams, title="Teams", active="team")
@app.route('/team/<string:team_id>/')
def team(team_id):
teams = db.session.query(Team).filter(Team.id != team_id).all()
team = db.session.query(Team).get(team_id)
if not team:
abort(404)
return render_template('team.html', team=team, teams=teams, active="team")
@app.route('/guild/<string:guild_id>/')
def guild(guild_id):
guilds = db.session.query(Guild).filter(Guild.id != guild_id).all()
guild = db.session.query(Guild).get(guild_id)
if not guild:
abort(404)
return render_template('team.html', team=guild, teams=guilds, active="guild")
@app.route('/guilds/')
def guilds():
guilds = db.session.query(Guild).all()
return render_template('teams.html', teams=guilds, title="Guilds", active="guild")
@app.route('/ajax_challenge/', methods=['POST'])
def ajax_challenge():
error = []
type = request.form['challenge-form-type']
challenger = uuid.UUID(request.form['challenge-form-current'])
challenged = request.form['challenge-form-name']
mode = request.form['challenge-form-mode']
start_date = datetime.datetime.now()
end_date = start_date + datetime.timedelta(days=7)
if type == 'team':
team1 = db.session.query(Team).filter_by(id=challenger).one()
team2 = db.session.query(Team).filter_by(name=challenged).one()
challenge = TeamChallenge(id=uuid.uuid4(), team1_id=team1.id, team2_id=team2.id, start=start_date, end=end_date, mode=mode)
else:
guild1 = db.session.query(Guild).filter_by(id=challenger).one()
guild2 = db.session.query(Guild).filter_by(name=challenged).one()
challenge = GuildChallenge(id=uuid.uuid4(), guild1_id=guild1.id, guild2_id=guild2.id, start=start_date, end=end_date, mode=mode)
try:
db.session.add(challenge)
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
return jsonify({'error': ', '.join(error)})
@app.route('/match/<string:match_id>/')
def match(match_id):
match = db.session.query(Match).get(match_id)
if not match:
abort(404)
return render_template('match.html', match=match)
@app.route('/debug/')
def test():
teams = db.session.query(Team).all()
player_ids = [member.id for team in teams for member in team._members]
chunks = [player_ids[x:x + 10] for x in range(0, len(player_ids), 10)]
for chunk in chunks:
request_data.threaded_process_range(2, chunk, "ranked")
print("time.sleep(60)")
time.sleep(60)
return 'ok'
@app.errorhandler(400)
def bad_request(e):
return render_template('404.html'), 400
@app.errorhandler(401)
def not_authorized(e):
return render_template('404.html'), 401
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(405)
def method_not_allowed(e):
return render_template('404.html'), 405
@app.errorhandler(500)
def internal_error(e):
return render_template('500.html'), 500
|
|
#
# Copyright 2014-2017 University of Southern California
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
#
from collections import namedtuple
import os
import numpy as np
import tifffile
from tifffile import lazyattr
from xml.dom import minidom
from functools import reduce
ImageMetadata = namedtuple('ImageMetadata', ['x_microns', 'y_microns', 'z_microns', 'axes'])
def plane_distance(p, plane):
"""Return signed distance to plane of point."""
x, y, z = p
A, B, C, D = plane
return A*x + B*y + C*z + D
def clamp(x, x_min, x_max):
return max(x_min, min(x, x_max))
def bin_reduce(data, axes_s):
"""Reduce ndarray data via bin-averaging for specified per-axis bin sizes.
For a 3-D input data with shape (D, H, W) and axes_s of [s1, s2,
s3], the output value result[0, 0, 0] will contain the average
of all input values in slice data[0:s1, 0:s2, 0:s3] and the result
will have shape (D/s1, H/s2, W/s3).
The implementation is optimized for large ndarray data with
relatively small bin sizes. It makes a number of Python
operations proportional to the product of per-axis bin sizes,
but minimizes the elementwise data access and arithmetic.
This function converts input data to float32 for averaging and
it is the caller's responsibility to convert back to a desired
type.
"""
d1 = data
# sort axes by stride distance to optimize for locality
# doesn't seem to make much difference on modern systems...
axes = [ (axis, d1.strides[axis]) for axis in range(d1.ndim) ]
axes.sort(key=lambda p: p[1])
assert len(axes_s) == data.ndim
# reduce one axis at a time to shrink work for subsequent axes
for axis in [p[0] for p in axes]:
s = axes_s[axis]
if s == 1:
# skip useless copying for non-reducing axis
continue
# accumulate s-strided subsets that belong to each bin
a = d1[
tuple(
[ slice(None) for i in range(axis) ]
+ [ slice(0, 1-s, s) ]
+ [ slice(None) for i in range(d1.ndim - axis - 1) ]
)
].astype(np.float32, copy=True)
for step in range(1, s):
a += d1[
tuple(
[ slice(None) for i in range(axis) ]
+ [ slice(step, step < s and 1-s+step or None, s) ]
+ [ slice(None) for i in range(d1.ndim - axis - 1) ]
)
]
# compute single-axis bin averages from accumulation
d1 = a * (1./s)
return d1
class TiffLazyNDArray (object):
"""Lazy wrapper for large TIFF image stacks.
Supports some basic ND-array compatibility for data access,
with an intended use case of sub-block decomposition of large
TIFF stacks, where it is not desirable to hold the entire image
array in RAM at one time.
Slicing via the usual __getitem__ interface will perform
memmapped file I/O to build and return an actual numpy
ND-array.
Basic min/max methods will stream through the whole image file
while only buffering one page of image data at a time.
"""
def __init__(self, src, _output_plan=None):
"""Wrap an image source given by filename or an existing tifffile.TiffFile instance."""
if isinstance(src, str):
self.tf = tifffile.TiffFile(src)
elif isinstance(src, tifffile.TiffFile):
self.tf = src
elif isinstance(src, TiffLazyNDArray):
self.tf = src.tf
tfimg = self.tf.series[0]
page0 = tfimg.pages[0]
self.dtype = tfimg.dtype
self.tf_shape = tfimg.shape
self.tf_axes = tfimg.axes
self.stack_ndim = len(tfimg.shape) - len(page0.shape)
self.stack_shape = tfimg.shape[0:self.stack_ndim]
print("TIFF %s %s %s, page0 %s, stack %s, axes %s?" % (tfimg.shape, tfimg.axes, tfimg.dtype, page0.shape, self.stack_shape, tfimg.axes))
assert reduce(lambda a,b: a*b, self.stack_shape, 1) == len(tfimg.pages), "TIFF page count %s inconsistent with expected stack shape %s" % (len(tfimg.pages), self.stack_shape)
assert tfimg.shape[self.stack_ndim:] == page0.shape, "TIFF page packing structure not understood"
if _output_plan:
self.output_plan = _output_plan
else:
self.output_plan = [
(a, slice(0, self.tf_shape[a], 1), slice(0, self.tf_shape[a], 1))
for a in range(len(tfimg.shape))
]
if isinstance(src, TiffLazyNDArray):
# preserve existing metadata
self.micron_spacing = src.micron_spacing
elif self.tf.is_ome:
# get OME-TIFF XML metadata
p = self.tf.pages[0]
try:
s = p.tags['ImageDescription'].value
except KeyError:
# older behavior of tifffile
s = p.tags['image_description']
d = minidom.parseString(s)
a = dict(list(d.getElementsByTagName('Pixels')[0].attributes.items()))
p = None
d = None
assert len(self.tf.series) == 1
self.micron_spacing = (
float(a['PhysicalSizeZ']),
float(a['PhysicalSizeY']),
float(a['PhysicalSizeX'])
)
elif self.tf.is_lsm:
# get LSM metadata
lsmi = None
for page in self.tf.pages:
if page.is_lsm:
lsmi = page.cz_lsm_info
assert lsmi is not None
self.micron_spacing = (
lsmi.voxel_size_z * 10**6,
lsmi.voxel_size_y * 10**6,
lsmi.voxel_size_x * 10**6
)
def _plan_slicing(self, key):
assert isinstance(key, tuple)
tfimg = self.tf.series[0]
output_plan = [
(tf_axis, in_slice, out_slice)
for tf_axis, in_slice, out_slice in self.output_plan
if out_slice is None and in_slice is not None
]
current_plan = [ # FIFO of dimensions projected by key
(tf_axis, in_slice, out_slice)
for tf_axis, in_slice, out_slice in self.output_plan
if out_slice is not None
]
for elem in key:
if elem is None:
# inject fake output dimension
tf_axis = None
out_slice = slice(0,1,1)
in_slice = None
else:
tf_axis, in_slice, out_slice = current_plan.pop(0)
if isinstance(elem, int):
# collapse projected dimension
if elem < 0:
elem += out_slice.stop
if elem >= out_slice.stop or elem < 0:
raise IndexError('index %d out of range [0,%d)' % (elem, out_slice.stop))
if isinstance(in_slice, slice):
in_slice = elem + in_slice.start
else:
continue
out_slice = None
elif isinstance(elem, slice):
# modify sliced dimension
if elem.step is None:
step = 1
else:
step = elem.step
assert step > 0, "only positive stepping is supported"
if elem.start is None:
start = 0
elif elem.start < 0:
start = elem.start + out_slice.stop
else:
start = elem.start
if elem.stop is None:
stop = out_slice.stop
elif elem.stop < 0:
stop = elem.stop + out_slice.stop
else:
stop = elem.stop
start = max(min(start, out_slice.stop), 0)
stop = max(min(stop, out_slice.stop), 0)
assert start < stop, "empty slicing not supported"
if isinstance(in_slice, slice):
in_slice = slice(in_slice.start + start, in_slice.start + stop, in_slice.step * step)
w = in_slice.stop - in_slice.start
w = w//in_slice.step + (w%in_slice.step and 1 or 0)
out_slice = slice(0,w,1)
else:
in_slice = None
out_slice = slice(0,1,1)
output_plan.append((tf_axis, in_slice, out_slice))
assert not current_plan, "slicing key must project all image dimensions"
return output_plan
def __getitem__(self, key):
tfimg = self.tf.series[0]
output_plan = self._plan_slicing(key)
# skip fake dimensions for intermediate buffer
buffer_plan = [
(tf_axis, in_slice, out_slice)
for tf_axis, in_slice, out_slice in output_plan
if in_slice is not None
]
# input will be untransposed with dimension in TIFF order
input_plan = list(buffer_plan)
input_plan.sort(key=lambda p: p[0])
assert len(input_plan) == len(tfimg.shape)
# buffer may have fewer dimensions than input slicing due to integer keys
buffer_shape = tuple([
out_slice.stop
for tf_axis, in_slice, out_slice in input_plan
if isinstance(in_slice, slice)
])
buffer_axes = [
tf_axis
for tf_axis, in_slice, out_slice in input_plan
if isinstance(in_slice, slice)
]
buffer = np.empty(buffer_shape, self.dtype)
# generate page-by-page slicing
stack_plan = input_plan[0:self.stack_ndim]
page_plan = input_plan[self.stack_ndim:]
def generate_io_slices(stack_plan, page_plan):
if stack_plan:
tf_axis, in_slice, out_slice = stack_plan[0]
if isinstance(in_slice, slice):
for x in range(in_slice.start, in_slice.stop):
for outslc, inslc in generate_io_slices(stack_plan[1:], page_plan):
yield ((x - in_slice.start,) + outslc, (x,) + inslc)
elif isinstance(in_slice, int):
for outslc, inslc in generate_io_slices(stack_plan[1:], page_plan):
yield (outslc, (in_slice,) + inslc)
else:
assert False
else:
yield tuple(p[2] for p in page_plan if p[2] is not None), tuple(p[1] for p in page_plan)
stack_spans = [
reduce(lambda a,b: a*b, self.stack_shape[i+1:], 1)
for i in range(self.stack_ndim)
]
# perform actual pixel I/O
for out_slicing, in_slicing in generate_io_slices(stack_plan, page_plan):
page = sum(map(lambda c, s: c*s, in_slicing[0:self.stack_ndim], stack_spans))
page_slice = in_slicing[self.stack_ndim:]
try:
buffer[out_slicing] = tfimg.pages[page].asarray(out='memmap')[page_slice]
except TypeError as e:
# try older tifffile memmap interface
buffer[out_slicing] = tfimg.pages[page].asarray(memmap=True)[page_slice]
# apply current transposition to buffered dimensions
buffer_axis = dict([(buffer_axes[d], d) for d in range(len(buffer_axes))])
transposition = [
buffer_axis[tf_axis]
for tf_axis, in_slice, out_slice in output_plan
if isinstance(in_slice, slice)
]
buffer = buffer.transpose(tuple(transposition))
out_slicing = [
in_slice is not None and out_slice or in_slice
for tf_axis, in_slice, out_slice in output_plan
if isinstance(in_slice, slice) or in_slice is None
]
return buffer[tuple(out_slicing)]
def transpose(self, *transposition):
output_plan = [
(tf_axis, in_slice, out_slice)
for tf_axis, in_slice, out_slice in self.output_plan
if out_slice is None
]
current_plan = [ # FIFO of dimensions projected by key
(tf_axis, in_slice, out_slice)
for tf_axis, in_slice, out_slice in self.output_plan
if out_slice is not None
]
for d in transposition:
assert current_plan[d] is not None, "transpose cannot repeat same dimension"
p = current_plan[d]
current_plan[d] = None
output_plan.append(p)
assert len([p for p in current_plan if p is not None]) == 0, "transpose must include dimensions"
return TiffLazyNDArray(self, output_plan)
def lazyget(self, key):
output_plan = self._plan_slicing(key)
return TiffLazyNDArray(self, output_plan)
def force(self):
return self[tuple(slice(None) for d in self.shape)]
@property
def ndim(self):
return len([p for p in self.output_plan if p[2] is not None])
@property
def shape(self):
return tuple(p[2].stop for p in self.output_plan if p[2] is not None)
@property
def axes(self):
return ''.join(p[0] is not None and self.tf_axes[p[0]] or 'Q' for p in self.output_plan if p[2] is not None)
@property
def strides(self):
plan = [(p[0], p[2].stop) for p in self.output_plan if p[2] is not None]
plan = [(i,) + plan[i] for i in range(len(plan))]
plan.sort(key=lambda p: p[1])
strides = []
for i in range(len(plan)):
strides.append((plan[i][0], reduce(lambda a, b: a*b, [p[2] for p in plan[i+1:]], 1)))
strides.sort(key=lambda p: p[0])
strides = [p[1] for p in strides]
return strides
@lazyattr
def min_max(self):
amin = None
amax = None
tfimg = self.tf.series[0]
for tfpage in tfimg.pages:
try:
p = tfpage.asarray(out='memmap')
except TypeError as e:
# try older tifffile api
p = tfpage.asarray(memmap=True)
pmin = float(p.min())
pmax = float(p.max())
if amin is not None:
amin = min(amin, pmin)
else:
amin = pmin
if amax is not None:
amax = max(amax, pmax)
else:
amax = pmax
return (amin, amax)
def max(self):
return self.min_max[1]
def min(self):
return self.min_max[0]
def canonicalize(data):
"""Restructure to preferred TCZYX or CZYX form..."""
data = data.transpose(*[d for d in map(data.axes.find, 'TCIZYX') if d >= 0])
projection = []
if 'T' in data.axes and data.shape[0] == 1:
projection.append(0) # remove trivial T dimension
if 'C' not in data.axes:
projection.append(None) # add trivial C dimension
elif projection:
projection.append(slice(None))
if projection:
projection += [slice(None) for d in 'ZYX']
data = data.lazyget(tuple(projection))
return data
def load_tiff(fname):
"""Load named file using TIFF reader, returning (data, metadata).
Keep temporarily for backward-compatibility...
"""
data = TiffLazyNDArray(fname)
try:
data = canonicalize(data)
except Exception as e:
print(e)
# special case for raw TIFF (not LSM, not OME)
if data.ndim == 3:
data = data[(None,slice(None),slice(None),slice(None))] # add fake color dimension
elif data.ndim == 4 and data.shape[3] < 4:
data = data.transpose(3,0,1,2) # transpose color
try:
z_microns, y_microns, x_microns = data.micron_spacing
md = ImageMetadata(x_microns, y_microns, z_microns, data.axes)
except AttributeError as e:
print('got error %s fetching metadata during load_tiff' % e)
md = None
return data, md
def load_image(fname):
"""Load named file, returning (data, metadata).
Keep temporarily for backward-compatibility...
"""
return load_tiff(fname)
class wrapper (np.ndarray):
"""Subtype to allow extra attributes"""
pass
def load_and_mangle_image(fname):
"""Load and mangle TIFF image file.
Arguments:
fname: LSM or OME-TIFF input file name
Environment parameters:
ZYX_SLICE: selects ROI within full image
ZYX_IMAGE_GRID: overrides image grid step metadata
ZNOISE_PERCENTILE: see source
ZNOISE_ZERO_LEVEL: see source
Results tuple fields:
image
meta
slice_origin
"""
I, meta = load_image(fname)
try:
voxel_size = tuple(map(float, os.getenv('ZYX_IMAGE_GRID').split(",")))
print("ZYX_IMAGE_GRID environment forces image grid of %s micron." % (voxel_size,))
assert len(voxel_size) == 3
except:
try:
voxel_size = I.micron_spacing
print("Using detected %s micron image grid." % (voxel_size,))
except AttributeError:
print("ERROR: could not determine image grid spacing. Use ZYX_IMAGE_GRID=Z,Y,X to override.")
raise
meta = ImageMetadata(voxel_size[2], voxel_size[1], voxel_size[0], I.axes)
setattr(I, 'micron_spacing', voxel_size)
# temporary pre-processing hacks to investigate XY-correlated sensor artifacts...
try:
ntile = int(os.getenv('ZNOISE_PERCENTILE'))
I = I.force().astype(np.float32)
zerofield = np.percentile(I, ntile, axis=1)
print('Image %d percentile value over Z-axis ranges [%f,%f]' % (ntile, zerofield.min(), zerofield.max()))
I -= zerofield
print('Image offset by %d percentile XY value to new range [%f,%f]' % (ntile, I.min(), I.max()))
zero = float(os.getenv('ZNOISE_ZERO_LEVEL', 0))
I = I * (I >= 0.)
print('Image clamped to range [%f,%f]' % (I.min(), I.max()))
except:
pass
I = I.transpose(1,2,3,0)
# allow user to select a bounding box region of interest
bbox = os.getenv('ZYX_SLICE')
slice_origin = (0, 0, 0)
if bbox:
bbox = bbox.split(",")
assert len(bbox) == 3, "ZYX_SLICE must have comma-separated slices for 3 axes Z,Y,X"
def parse_axis(slc_s, axis_len):
bounds = slc_s.split(":")
assert len(bounds) == 2, "ZYX_SLICE must have colon-separated START:STOP pairs for each axis"
if bounds[0] != '':
assert int(bounds[0]) >= 0, "ZYX_SLICE START values must be 0 or greater or empty string"
assert int(bounds[0]) < (axis_len-2), "ZYX_SLICE START values must be less than axis length - 2"
bounds[0] = int(bounds[0])
else:
bounds[0] = 0
if bounds[1] != '':
assert int(bounds[1]) >= bounds[0], "ZYX_SLICE STOP values must be greater than START or empty string"
bounds[1] = int(bounds[1])
else:
bounds[1] = axis_len
return slice(bounds[0], bounds[1])
bbox = tuple([
parse_axis(bbox[d], I.shape[d])
for d in range(3)
]) + (slice(None),)
I = I.lazyget(bbox)
slice_origin = tuple([
slc.start or 0
for slc in bbox[0:3]
])
if I.shape[2] % 16:
# trim for 16-pixel row alignment
slc = tuple([
slice(None),
slice(None),
slice(0,I.shape[2]//16*16),
slice(None)
])
if hasattr(I, 'lazyget'):
I = I.lazyget(slc)
else:
I = I[slc]
if isinstance(I, np.ndarray):
# temporarily maintain micron_spacing after munging above...
I2 = wrapper(shape=I.shape, dtype=I.dtype)
I2[:,:,:,:] = I[:,:,:,:]
I = I2
setattr(I, 'micron_spacing', voxel_size)
return I, meta, slice_origin
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class timers(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/timers. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Timers related to a BGP peer-group
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "timers"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"timers",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/config (container)
YANG Description: Configuration parameters relating to timers used for the
BGP neighbor or peer group
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to timers used for the
BGP neighbor or peer group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/state (container)
YANG Description: State information relating to the timers used for the BGP
group
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the timers used for the BGP
group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class timers(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/timers. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Timers related to a BGP peer-group
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "timers"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"timers",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/config (container)
YANG Description: Configuration parameters relating to timers used for the
BGP neighbor or peer group
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to timers used for the
BGP neighbor or peer group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/state (container)
YANG Description: State information relating to the timers used for the BGP
group
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to the timers used for the BGP
group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
|
#!/usr/bin/env python
# Copyright (c) 2010-2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Runs a RAMCloud.
Used to exercise a RAMCloud cluster (e.g., for performance measurements)
by running a collection of servers and clients.
"""
from __future__ import division
from common import *
import itertools
import log
import os
import pprint
import re
import subprocess
import sys
import time
from optparse import OptionParser
#------------------------------------------------------------------
# End of site-specific configuration.
#------------------------------------------------------------------
# Locations of various RAMCloud executables.
coordinator_binary = '%s/coordinator' % obj_path
server_binary = '%s/server' % obj_path
ensure_servers_bin = '%s/ensureServers' % obj_path
# Info used to construct service locators for each of the transports
# supported by RAMCloud. In some cases the locator for the coordinator
# needs to be different from that for the servers.
server_locator_templates = {
'tcp': 'tcp:host=%(host)s,port=%(port)d',
'tcp-1g': 'tcp:host=%(host1g)s,port=%(port)d',
'fast+udp': 'fast+udp:host=%(host)s,port=%(port)d',
'fast+udp-1g': 'fast+udp:host=%(host1g)s,port=%(port)d',
'unreliable+udp': 'unreliable+udp:host=%(host)s,port=%(port)d',
'infrc': 'infrc:host=%(host)s,port=%(port)d',
'fast+infud': 'fast+infud:',
'unreliable+infud': 'unreliable+infud:',
'fast+infeth': 'fast+infeth:mac=00:11:22:33:44:%(id)02x',
'unreliable+infeth': 'unreliable+infeth:mac=00:11:22:33:44:%(id)02x',
}
coord_locator_templates = {
'tcp': 'tcp:host=%(host)s,port=%(port)d',
'tcp-1g': 'tcp:host=%(host1g)s,port=%(port)d',
'fast+udp': 'fast+udp:host=%(host)s,port=%(port)d',
'fast+udp-1g': 'fast+udp:host=%(host1g)s,port=%(port)d',
'unreliable+udp': 'unreliable+udp:host=%(host)s,port=%(port)d',
'infrc': 'infrc:host=%(host)s,port=%(port)d',
# Coordinator uses udp even when rest of cluster uses infud
# or infeth.
'fast+infud': 'fast+udp:host=%(host)s,port=%(port)d',
'unreliable+infud': 'fast+udp:host=%(host)s,port=%(port)d',
'fast+infeth': 'fast+udp:host=%(host)s,port=%(port)d',
'unreliable+infeth': 'fast+udp:host=%(host)s,port=%(port)d',
}
def server_locator(transport, host, port=server_port):
"""Generate a service locator for a master/backup process.
@param transport: A transport name (e.g. infrc, fast+udp, tcp, ...)
@type transport: C{str}
@param host: A 3-tuple of (hostname, ip, id).
@type host: C{(str, str, int)}
@param port: Port which should be part of the locator (if any).
Allows multiple services to be started on the same host.
@type port: C{int}
@return: A service locator.
@rtype: C{str}
"""
locator = (server_locator_templates[transport] %
{'host': host[1],
'host1g': host[0],
'port': port,
'id': host[2]})
return locator
def coord_locator(transport, host):
"""Generate a service locator for a coordinator process.
@param transport: A transport name (e.g. infrc, fast+udp, tcp, ...)
@type transport: C{str}
@param host: A 3-tuple of (hostname, ip, id).
@type host: C{(str, str, int)}
@return: A service locator.
@rtype: C{str}
"""
locator = (coord_locator_templates[transport] %
{'host': host[1],
'host1g': host[0],
'port': coordinator_port,
'id': host[2]})
return locator
def run(
num_servers=4, # Number of hosts on which to start
# servers (not including coordinator).
backups_per_server=1, # Number of backups to run on each
# server host (0, 1, or 2).
replicas=3, # Replication factor to use for each
# log segment.
disk1=default_disk1, # Server arguments specifying the
# backing device for the first backup
# on each server.
disk2=default_disk2, # Server arguments specifying the
# backing device for the first backup
# on each server (if backups_per_server= 2).
timeout=20, # How many seconds to wait for the
# clients to complete.
coordinator_args='', # Additional arguments for the
# coordinator.
master_args='', # Additional arguments for each server
# that runs a master
backup_args='', # Additional arguments for each server
# that runs a backup.
log_level='NOTICE', # Log level to use for all servers.
log_dir='logs', # Top-level directory in which to write
# log files. A separate subdirectory
# will be created in this directory
# for the log files from this run.
client='echo', # Command-line to invoke for each client
# additional arguments will be prepended
# with configuration information such as
# -C.
num_clients=1, # Number of client processes to run.
# They will all run on separate
# machines, if possible, but if there
# aren't enough available machines then
# multiple clients will run on some
# machines.
share_hosts=False, # True means clients can be run on
# machines running servers, if needed.
transport='infrc', # Name of transport to use for servers.
verbose=False, # Print information about progress in
# starting clients and servers
debug=False, # If True, pause after starting all
# to allow for debugging setup such as
# attaching gdb.
old_master_host=None, # Pass a (hostname, ip, id) tuple to
# construct a large master on that host
# before the others are started. Useful
# for creating the old master for
# recoveries.
old_master_args="" # Additional arguments to run on the
# old master (e.g. total RAM).
):
"""
Start a coordinator and servers, as indicated by the arguments.
Then start one or more client processes and wait for them to complete.
@return: string indicating the path to the log files for this run.
"""
if num_servers > len(hosts):
raise Exception("num_servers (%d) exceeds the available hosts (%d)"
% (num_servers, len(hosts)))
# Create a subdirectory of the log directory for this run
log_subdir = log.createDir(log_dir)
coordinator = None
servers = []
clients = []
with Sandbox() as sandbox:
def ensure_servers(numMasters, numBackups):
sandbox.checkFailures()
try:
sandbox.rsh(hosts[0][0], '%s -C %s -m %d -b %d -l 1 --wait 5 '
'--logFile %s/ensureServers.log' %
(ensure_servers_bin, coordinator_locator,
numMasters, numBackups, log_subdir))
except:
# prefer exceptions from dead processes to timeout error
sandbox.checkFailures()
raise
# Start coordinator
if num_servers > 0:
coordinator_host = hosts[0]
coordinator_locator = coord_locator(transport, coordinator_host)
coordinator = sandbox.rsh(coordinator_host[0],
('%s -C %s -l %s --logFile %s/coordinator.%s.log %s' %
(coordinator_binary, coordinator_locator, log_level,
log_subdir, coordinator_host[0], coordinator_args)),
bg=True, stderr=subprocess.STDOUT)
ensure_servers(0, 0)
if verbose:
print "Coordinator started on %s at %s" % (coordinator_host[0],
coordinator_locator)
# Track how many services are registered with the coordinator
# for ensure_servers
masters_started = 0
backups_started = 0
# Start old master - a specialized master for recovery with lots of data
if old_master_host:
host = old_master_host
command = ('%s -C %s -L %s -M -r %d -l %s '
'--logFile %s/oldMaster.%s.log %s' %
(server_binary, coordinator_locator,
server_locator(transport, host),
replicas, log_level, log_subdir, host[0],
old_master_args))
servers.append(sandbox.rsh(host[0], command, ignoreFailures=True,
bg=True, stderr=subprocess.STDOUT))
masters_started += 1
ensure_servers(masters_started, 0)
# Start servers
for i in range(num_servers):
# First start the main server on this host, which runs a master
# and possibly a backup. The first server shares the same machine
# as the coordinator.
host = hosts[i];
command = ('%s -C %s -L %s -r %d -l %s '
'--logFile %s/server.%s.log %s' %
(server_binary, coordinator_locator,
server_locator(transport, host),
replicas, log_level, log_subdir, host[0],
master_args))
if backups_per_server > 0:
command += ' %s %s' % (disk1, backup_args)
masters_started += 1
backups_started += 1
else:
command += ' -M'
masters_started += 1
servers.append(sandbox.rsh(host[0], command, bg=True,
stderr=subprocess.STDOUT))
if verbose:
print "Server started on %s at %s" % (host[0],
server_locator(transport,
host))
# Start an extra backup server in this host, if needed.
if backups_per_server == 2:
command = ('%s -C %s -L %s -B %s -l %s '
'--logFile %s/backup.%s.log %s' %
(server_binary, coordinator_locator,
server_locator(transport, host, second_backup_port),
disk2, log_level, log_subdir, host[0],
backup_args))
servers.append(sandbox.rsh(host[0], command, bg=True,
stderr=subprocess.STDOUT))
backups_started += 1
if verbose:
print "Extra backup started on %s at %s" % (host[0],
server_locator(transport, host, second_backup_port))
if debug:
print "Servers started; pausing for debug setup."
raw_input("Type <Enter> to continue: ")
if masters_started > 0 or backups_started > 0:
ensure_servers(masters_started, backups_started)
if verbose:
print "All servers running"
# Start clients
args = client.split(" ")
client_bin = args[0]
client_args = " ".join(args[1:])
host_index = num_servers
for i in range(num_clients):
if host_index >= len(hosts):
if share_hosts or num_servers >= len(hosts):
host_index = 0
else:
host_index = num_servers
client_host = hosts[host_index]
command = ('%s -C %s --numClients %d --clientIndex %d '
'--logFile %s/client%d.%s.log %s' %
(client_bin, coordinator_locator, num_clients,
i, log_subdir, i, client_host[0], client_args))
clients.append(sandbox.rsh(client_host[0], command, bg=True))
if verbose:
print "Client %d started on %s: %s" % (i, client_host[0],
command)
host_index += 1
# Wait for all of the clients to complete
start = time.time()
for i in range(num_clients):
while clients[i].returncode is None:
sandbox.checkFailures()
time.sleep(.1)
if time.time() - start > timeout:
raise Exception('timeout exceeded')
if verbose:
print "Client %d finished" % i
return log_subdir
if __name__ == '__main__':
parser = OptionParser(description=
'Start RAMCloud servers and run a client application.',
conflict_handler='resolve')
parser.add_option('--backupArgs', metavar='ARGS', default='',
dest='backup_args',
help='Additional command-line arguments to pass to '
'each backup')
parser.add_option('-b', '--backups', type=int, default=1,
metavar='N', dest='backups_per_server',
help='Number of backups to run on each server host '
'(0, 1, or 2)')
parser.add_option('--client', metavar='ARGS', default='echo',
help='Command line to invoke the client application '
'(additional arguments will be inserted at the beginning '
'of the argument list)')
parser.add_option('-n', '--clients', type=int, default=1,
metavar='N', dest='num_clients',
help='Number of instances of the client application '
'to run')
parser.add_option('--coordinatorArgs', metavar='ARGS', default='',
dest='coordinator_args',
help='Additional command-line arguments to pass to the '
'cluster coordinator')
parser.add_option('--debug', action='store_true', default=False,
help='Pause after starting servers but before running '
'clients to enable debugging setup')
parser.add_option('--disk1', default=default_disk1,
help='Server arguments to specify disk for first backup')
parser.add_option('--disk2', default=default_disk2,
help='Server arguments to specify disk for second backup')
parser.add_option('-l', '--logLevel', default='NOTICE',
choices=['DEBUG', 'NOTICE', 'WARNING', 'ERROR', 'SILENT'],
metavar='L', dest='log_level',
help='Controls degree of logging in servers')
parser.add_option('-d', '--logDir', default='logs', metavar='DIR',
dest='log_dir',
help='Top level directory for log files; the files for '
'each invocation will go in a subdirectory.')
parser.add_option('--masterArgs', metavar='ARGS', default='',
dest='master_args',
help='Additional command-line arguments to pass to '
'each master')
parser.add_option('-r', '--replicas', type=int, default=3,
metavar='N',
help='Number of disk backup copies for each segment')
parser.add_option('-s', '--servers', type=int, default=4,
metavar='N', dest='num_servers',
help='Number of hosts on which to run servers')
parser.add_option('--shareHosts', action='store_true', default=False,
dest='share_hosts',
help='Allow clients to run on machines running servers '
'(by default clients run on different machines than '
'the servers, though multiple clients may run on a '
'single machine)')
parser.add_option('-t', '--timeout', type=int, default=20,
metavar='SECS',
help="Abort if the client application doesn't finish within "
'SECS seconds')
parser.add_option('-T', '--transport', default='infrc',
help='Transport to use for communication with servers')
parser.add_option('-v', '--verbose', action='store_true', default=False,
help='Print progress messages')
(options, args) = parser.parse_args()
status = 0
try:
run(**vars(options))
finally:
logInfo = log.scan("logs/latest", ["WARNING", "ERROR"])
if len(logInfo) > 0:
print >>sys.stderr, logInfo
status = 1
quit(status)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2021 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Tests for the :py:mod:`luma.core.cmdline` module.
"""
import pytest
import errno
from unittest.mock import patch, Mock
from luma.core import cmdline, error
from luma.core.interface.serial import __all__ as serial_iface_types
from luma.core.interface.parallel import __all__ as parallel_iface_types
from helpers import (get_reference_file, i2c_error, rpi_gpio_missing,
spidev_missing, skip_unsupported_platform)
test_config_file = get_reference_file('config-test.txt')
class test_spi_opts(object):
spi_port = 0
spi_device = 0
spi_bus_speed = 8000000
spi_transfer_size = 4096
gpio_data_command = 24
gpio_reset = 25
gpio_backlight = 18
gpio_reset_hold_time = 0
gpio_reset_release_time = 0
interface = 'spi'
framebuffer = 'diff_to_previous'
num_segments = 25
debug = False
framebuffer_device = None
def test_get_interface_types():
"""
Enumerate interface types.
"""
assert cmdline.get_interface_types() == serial_iface_types + parallel_iface_types
def test_ensure_cmdline_opt_contains_all_interfaces():
"""
Checks that the cmdline make_interface factory contains initializers for all interface classes
"""
class opts:
pass
factory = cmdline.make_interface(opts)
for interface in cmdline.get_interface_types():
assert hasattr(factory, interface)
def test_get_display_types():
"""
Enumerate display types.
"""
assert list(cmdline.get_display_types().keys()) == \
cmdline.get_supported_libraries()
def test_get_choices_unknown_module():
"""
:py:func:`luma.core.cmdline.get_choices` returns an empty list when
trying to inspect an unknown module.
"""
result = cmdline.get_choices('foo')
assert result == []
def test_get_library_version():
"""
:py:func:`luma.core.cmdline.get_library_version` returns the version number
for the specified library name.
"""
lib_name = 'hotscreenz'
lib_version = '0.1.2'
# set version nr for fake luma library
luma_fake_lib = Mock()
luma_fake_lib.__version__ = lib_version
# version is found
with patch.dict('sys.modules', {'luma.' + lib_name: luma_fake_lib}):
assert cmdline.get_library_version(lib_name) == lib_version
# no version for module without __version__ attribute
lib_name = 'no_version'
luma_without_version_lib = Mock()
with patch.dict('sys.modules', {'luma.' + lib_name: luma_without_version_lib}):
assert cmdline.get_library_version(lib_name) is None
# no version for non-existing module
assert cmdline.get_library_version('foo') is None
def test_get_library_for_display_type():
"""
:py:func:`luma.core.cmdline.get_library_for_display_type` returns the
the library name for a particular display.
"""
display_type = 'coolscreen'
lib_name = 'screens'
with patch('luma.core.cmdline.get_display_types') as mocka:
mocka.return_value = {
lib_name: [display_type, 'bar'],
'emulator': ['x', 'y']
}
assert cmdline.get_library_for_display_type(display_type) == lib_name
def test_load_config_file_parse():
"""
:py:func:`luma.core.cmdline.load_config` parses a text file and returns a
list of arguments.
"""
result = cmdline.load_config(test_config_file)
assert result == [
'--display=capture',
'--width=800',
'--height=8600',
'--spi-bus-speed=16000000'
]
def test_create_parser():
"""
:py:func:`luma.core.cmdline.create_parser` returns an argument parser
instance.
"""
with patch.dict('sys.modules', **{
'luma.emulator': Mock(),
'luma.emulator.render': Mock(),
}):
with patch('luma.core.cmdline.get_display_types') as mocka:
mocka.return_value = {
'foo': ['a', 'b'],
'bar': ['c', 'd'],
'emulator': ['e', 'f']
}
parser = cmdline.create_parser(description='test')
args = parser.parse_args(['-f', test_config_file])
assert args.config == test_config_file
def test_make_interface_noop():
"""
:py:func:`luma.core.cmdline.make_interface.noop` returns an ``noop` instance.
"""
class opts:
interface = 'noop'
factory = cmdline.make_interface(opts)
assert 'luma.core.interface.serial.noop' in repr(factory.noop())
def test_make_interface_i2c():
"""
:py:func:`luma.core.cmdline.make_interface.i2c` returns an I2C instance.
"""
class opts:
i2c_port = 200
i2c_address = 0x710
path_name = f'/dev/i2c-{opts.i2c_port}'
fake_open = i2c_error(path_name, errno.ENOENT)
factory = cmdline.make_interface(opts)
with patch('os.open', fake_open):
with pytest.raises(error.DeviceNotFoundError):
factory.i2c()
def test_make_interface_spi():
"""
:py:func:`luma.core.cmdline.make_interface.spi` returns an SPI instance.
"""
try:
factory = cmdline.make_interface(test_spi_opts)
assert 'luma.core.interface.serial.spi' in repr(factory.spi())
except ImportError:
# non-rpi platform, e.g. macos
pytest.skip(rpi_gpio_missing)
except error.UnsupportedPlatform as e:
# non-rpi platform, e.g. ubuntu 64-bit
skip_unsupported_platform(e)
def test_make_interface_gpio_cs_spi():
"""
:py:func:`luma.core.cmdline.make_interface.gpio_cs_spi` returns an gpio_cs_spi instance.
"""
class opts(test_spi_opts):
interface = 'gpio_cs_spi'
spi_cs_high = True
gpio_chip_select = 4
try:
factory = cmdline.make_interface(opts)
assert 'luma.core.interface.serial.spi' in repr(factory.gpio_cs_spi())
except ImportError:
# non-rpi platform, e.g. macos
pytest.skip(rpi_gpio_missing)
except error.UnsupportedPlatform as e:
# non-rpi platform, e.g. ubuntu 64-bit
skip_unsupported_platform(e)
def test_make_interface_spi_alt_gpio():
"""
:py:func:`luma.core.cmdline.make_interface.spi` returns an SPI instance
when using an alternative GPIO implementation.
"""
class opts(test_spi_opts):
gpio = 'fake_gpio'
with patch.dict('sys.modules', **{
'fake_gpio': Mock(unsafe=True)
}):
try:
factory = cmdline.make_interface(opts)
assert 'luma.core.interface.serial.spi' in repr(factory.spi())
except ImportError:
pytest.skip(spidev_missing)
except error.DeviceNotFoundError as e:
# non-rpi platform, e.g. ubuntu 64-bit
skip_unsupported_platform(e)
def test_make_interface_bitbang():
"""
:py:func:`luma.core.cmdline.make_interface.bitbang` returns an BitBang instance.
"""
try:
factory = cmdline.make_interface(test_spi_opts)
assert 'luma.core.interface.serial.bitbang' in repr(factory.bitbang())
except ImportError:
# non-rpi platform, e.g. macos
pytest.skip(rpi_gpio_missing)
except error.UnsupportedPlatform as e:
# non-rpi platform, e.g. ubuntu 64-bit
skip_unsupported_platform(e)
def test_make_interface_pcf8574():
"""
:py:func:`luma.core.cmdline.make_interface.pcf8574` returns an pcf8574 instance.
"""
class opts:
i2c_port = 200
i2c_address = 0x710
path_name = f'/dev/i2c-{opts.i2c_port}'
fake_open = i2c_error(path_name, errno.ENOENT)
factory = cmdline.make_interface(opts)
with patch('os.open', fake_open):
with pytest.raises(error.DeviceNotFoundError):
factory.pcf8574()
def test_make_interface_bitbang_6800():
"""
:py:func:`luma.core.cmdline.make_interface.bitbang_6800` returns a Bitbang-6800 instance.
"""
class opts:
pass
try:
factory = cmdline.make_interface(opts)
assert 'luma.core.interface.parallel.bitbang_6800' in repr(factory.bitbang_6800())
except ImportError:
# non-rpi platform, e.g. macos
pytest.skip(rpi_gpio_missing)
except error.UnsupportedPlatform as e:
# non-rpi platform, e.g. ubuntu 64-bit
skip_unsupported_platform(e)
def test_make_interface_bitbang_6800_alt_gpio():
"""
:py:func:`luma.core.cmdline.make_interface.bitbang_6800` returns an Bitbang-6800 instance
when using an alternative GPIO implementation.
"""
class opts():
gpio = 'fake_gpio'
with patch.dict('sys.modules', **{
'fake_gpio': Mock(unsafe=True)
}):
try:
factory = cmdline.make_interface(opts)
assert 'luma.core.interface.parallel.bitbang_6800' in repr(factory.bitbang_6800())
except ImportError:
pytest.skip(spidev_missing)
except error.DeviceNotFoundError as e:
# non-rpi platform, e.g. ubuntu 64-bit
skip_unsupported_platform(e)
def test_create_device():
"""
:py:func:`luma.core.cmdline.create_device` returns ``None`` for unknown
displays.
"""
class args:
display = 'foo'
assert cmdline.create_device(args) is None
def test_create_device_oled():
"""
:py:func:`luma.core.cmdline.create_device` supports OLED displays.
"""
display_name = 'oled1234'
display_types = {'oled': [display_name]}
class args(test_spi_opts):
display = display_name
module_mock = Mock()
module_mock.oled.device.oled1234.return_value = display_name
with patch.dict('sys.modules', **{
# mock luma.oled package
'luma': module_mock,
'luma.oled': module_mock,
'luma.oled.device': module_mock
}):
try:
device = cmdline.create_device(args, display_types=display_types)
assert device == display_name
except ImportError:
pytest.skip(rpi_gpio_missing)
except error.UnsupportedPlatform as e:
# non-rpi platform
skip_unsupported_platform(e)
def test_create_device_lcd():
"""
:py:func:`luma.core.cmdline.create_device` supports LCD displays.
"""
display_name = 'lcd1234'
display_types = {'lcd': [display_name]}
class args(test_spi_opts):
display = display_name
gpio = 'fake_gpio'
backlight_active = 'low'
module_mock = Mock()
module_mock.lcd.device.lcd1234.return_value = display_name
with patch.dict('sys.modules', **{
# mock spidev and luma.lcd packages
'fake_gpio': module_mock,
'spidev': module_mock,
'luma': module_mock,
'luma.lcd': module_mock,
'luma.lcd.aux': module_mock,
'luma.lcd.device': module_mock
}):
device = cmdline.create_device(args, display_types=display_types)
assert device == display_name
def test_create_device_led_matrix():
"""
:py:func:`luma.core.cmdline.create_device` supports LED matrix displays.
"""
display_name = 'matrix1234'
display_types = {'led_matrix': [display_name]}
class args(test_spi_opts):
display = display_name
module_mock = Mock()
module_mock.led_matrix.device.matrix1234.return_value = display_name
with patch.dict('sys.modules', **{
# mock spidev and luma.led_matrix packages
'spidev': module_mock,
'luma': module_mock,
'luma.led_matrix': module_mock,
'luma.led_matrix.device': module_mock
}):
device = cmdline.create_device(args, display_types=display_types)
assert device == display_name
def test_create_device_emulator():
"""
:py:func:`luma.core.cmdline.create_device` supports emulators.
"""
display_name = 'emulator1234'
display_types = {'emulator': [display_name]}
class args(test_spi_opts):
display = display_name
module_mock = Mock()
module_mock.emulator.device.emulator1234.return_value = display_name
with patch.dict('sys.modules', **{
# mock spidev and luma.emulator packages
'spidev': module_mock,
'luma': module_mock,
'luma.emulator': module_mock,
'luma.emulator.device': module_mock
}):
device = cmdline.create_device(args, display_types=display_types)
assert device == display_name
def test_create_device_core():
"""
:py:func:`luma.core.cmdline.create_device` supports code devices.
"""
display_name = 'coredevice1234'
display_types = {'core': [display_name]}
class args(test_spi_opts):
display = display_name
module_mock = Mock()
module_mock.core.device.coredevice1234.return_value = display_name
with patch.dict('sys.modules', **{
# mock luma.core package
'luma': module_mock,
'luma.core': module_mock,
'luma.core.device': module_mock
}):
device = cmdline.create_device(args, display_types=display_types)
assert device == display_name
@patch('pyftdi.spi.SpiController')
def test_make_interface_ftdi_spi(mock_controller):
"""
:py:func:`luma.core.cmdline.make_interface.ftdi_spi` returns an SPI instance.
"""
class opts(test_spi_opts):
ftdi_device = 'ftdi://dummy'
gpio_data_command = 5
gpio_reset = 6
gpio_backlight = 7
factory = cmdline.make_interface(opts)
assert 'luma.core.interface.serial.spi' in repr(factory.ftdi_spi())
@patch('pyftdi.i2c.I2cController')
def test_make_interface_ftdi_i2c(mock_controller):
"""
:py:func:`luma.core.cmdline.make_interface.ftdi_i2c` returns an I2C instance.
"""
class opts:
ftdi_device = 'ftdi://dummy'
i2c_port = 200
i2c_address = 0x710
factory = cmdline.make_interface(opts)
assert 'luma.core.interface.serial.i2c' in repr(factory.ftdi_i2c())
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import platform
import re
import unittest
import warnings
import random
import sys
from pymatgen import SETTINGS, __version__ as pmg_version
from pymatgen.ext.matproj import MPRester, MPRestError, TaskType
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure, Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.bandstructure import (
BandStructureSymmLine, BandStructure)
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.analysis.pourbaix_diagram import PourbaixEntry, PourbaixDiagram
from pymatgen.analysis.wulff import WulffShape
from pymatgen.analysis.reaction_calculator import Reaction
from pymatgen.io.cif import CifParser
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.phonon.dos import CompletePhononDos
from pymatgen.util.testing import PymatgenTest
@unittest.skipIf(not SETTINGS.get("PMG_MAPI_KEY"),
"PMG_MAPI_KEY environment variable not set.")
class MPResterTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
self.rester = MPRester()
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
self.rester.session.close()
def test_get_all_materials_ids_doc(self):
mids = self.rester.get_materials_ids("Al2O3")
random.shuffle(mids)
doc = self.rester.get_doc(mids.pop(0))
self.assertEqual(doc["pretty_formula"], "Al2O3")
def test_get_xas_data(self):
# Test getting XAS data
data = self.rester.get_xas_data("mp-19017", "Li")
self.assertEqual("mp-19017,Li", data['mid_and_el'])
self.assertAlmostEqual(data['spectrum']['x'][0], 55.178, places=2)
self.assertAlmostEqual(data['spectrum']['y'][0], 0.0164634, places=2)
def test_get_data(self):
props = ["energy", "energy_per_atom", "formation_energy_per_atom",
"nsites", "unit_cell_formula", "pretty_formula", "is_hubbard",
"elements", "nelements", "e_above_hull", "hubbards",
"is_compatible", "task_ids",
"density", "icsd_ids", "total_magnetization"]
expected_vals = [-191.3359011, -6.833425039285714, -2.5515769497278913,
28, {'P': 4, 'Fe': 4, 'O': 16, 'Li': 4},
"LiFePO4", True, ['Li', 'O', 'P', 'Fe'], 4, 0.0,
{'Fe': 5.3, 'Li': 0.0, 'O': 0.0, 'P': 0.0}, True,
{'mp-19017', 'mp-540081', 'mp-601412'},
3.464840709092822,
[159107, 154117, 160776, 99860, 181272, 166815,
260571, 92198, 165000, 155580, 38209, 161479, 153699,
260569, 260570, 200155, 260572, 181341, 181342,
72545, 56291, 97764, 162282, 155635],
3.999999999]
for (i, prop) in enumerate(props):
if prop not in ['hubbards', 'unit_cell_formula', 'elements',
'icsd_ids', 'task_ids']:
val = self.rester.get_data("mp-19017", prop=prop)[0][prop]
self.assertAlmostEqual(expected_vals[i], val, 2, "Failed with property %s" % prop)
elif prop in ["elements", "icsd_ids", "task_ids"]:
upstream_vals = set(
self.rester.get_data("mp-19017", prop=prop)[0][prop])
self.assertLessEqual(set(expected_vals[i]), upstream_vals)
else:
self.assertEqual(expected_vals[i],
self.rester.get_data("mp-19017",
prop=prop)[0][prop])
props = ['structure', 'initial_structure', 'final_structure', 'entry']
for prop in props:
obj = self.rester.get_data("mp-19017", prop=prop)[0][prop]
if prop.endswith("structure"):
self.assertIsInstance(obj, Structure)
elif prop == "entry":
obj = self.rester.get_data("mp-19017", prop=prop)[0][prop]
self.assertIsInstance(obj, ComputedEntry)
# Test chemsys search
data = self.rester.get_data('Fe-Li-O', prop='unit_cell_formula')
self.assertTrue(len(data) > 1)
elements = {Element("Li"), Element("Fe"), Element("O")}
for d in data:
self.assertTrue(
set(Composition(d['unit_cell_formula']).elements).issubset(
elements))
self.assertRaises(MPRestError, self.rester.get_data, "Fe2O3",
"badmethod")
# Test getting supported properties
self.assertNotEqual(self.rester.get_task_data("mp-30"), [])
# Test aliasing
data = self.rester.get_task_data("mp-30", "energy")
self.assertAlmostEqual(data[0]["energy"], -4.09929227, places=2)
def test_get_materials_id_from_task_id(self):
self.assertEqual(self.rester.get_materials_id_from_task_id(
"mp-540081"), "mp-19017")
def test_get_materials_id_references(self):
# nosetests pymatgen/matproj/tests/test_matproj.py:MPResterTest.test_get_materials_id_references
m = MPRester()
data = m.get_materials_id_references('mp-123')
self.assertTrue(len(data) > 1000)
def test_find_structure(self):
# nosetests pymatgen/matproj/tests/test_matproj.py:MPResterTest.test_find_structure
m = MPRester()
ciffile = self.TEST_FILES_DIR / 'Fe3O4.cif'
data = m.find_structure(str(ciffile))
self.assertTrue(len(data) > 1)
s = CifParser(ciffile).get_structures()[0]
data = m.find_structure(s)
self.assertTrue(len(data) > 1)
def test_get_entries_in_chemsys(self):
syms = ["Li", "Fe", "O"]
syms2 = "Li-Fe-O"
entries = self.rester.get_entries_in_chemsys(syms)
entries2 = self.rester.get_entries_in_chemsys(syms2)
elements = set([Element(sym) for sym in syms])
for e in entries:
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(set(e.composition.elements).issubset(elements))
e1 = set([i.entry_id for i in entries])
e2 = set([i.entry_id for i in entries2])
self.assertTrue(e1 == e2)
def test_get_structure_by_material_id(self):
s1 = self.rester.get_structure_by_material_id("mp-1")
self.assertEqual(s1.formula, "Cs1")
def test_get_entry_by_material_id(self):
e = self.rester.get_entry_by_material_id("mp-19017")
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(e.composition.reduced_formula, "LiFePO4")
def test_query(self):
criteria = {'elements': {'$in': ['Li', 'Na', 'K'], '$all': ['O']}}
props = ['pretty_formula', 'energy']
data = self.rester.query(
criteria=criteria, properties=props, chunk_size=0)
self.assertTrue(len(data) > 6)
data = self.rester.query(
criteria="*2O", properties=props, chunk_size=0)
self.assertGreaterEqual(len(data), 52)
self.assertIn("Li2O", (d["pretty_formula"] for d in data))
def test_query_chunk_size(self):
criteria = {"nelements": 2, "elements": "O"}
props = ['pretty_formula']
data1 = self.rester.query(
criteria=criteria, properties=props, chunk_size=0)
data2 = self.rester.query(
criteria=criteria, properties=props, chunk_size=500)
self.assertEqual({d['pretty_formula'] for d in data1},
{d['pretty_formula'] for d in data2})
self.assertIn("Al2O3", {d['pretty_formula'] for d in data1})
def test_get_exp_thermo_data(self):
data = self.rester.get_exp_thermo_data("Fe2O3")
self.assertTrue(len(data) > 0)
for d in data:
self.assertEqual(d.formula, "Fe2O3")
def test_get_dos_by_id(self):
dos = self.rester.get_dos_by_material_id("mp-2254")
self.assertIsInstance(dos, CompleteDos)
def test_get_bandstructure_by_material_id(self):
bs = self.rester.get_bandstructure_by_material_id("mp-2254")
self.assertIsInstance(bs, BandStructureSymmLine)
bs_unif = self.rester.get_bandstructure_by_material_id(
"mp-2254", line_mode=False)
self.assertIsInstance(bs_unif, BandStructure)
self.assertNotIsInstance(bs_unif, BandStructureSymmLine)
def test_get_phonon_data_by_material_id(self):
bs = self.rester.get_phonon_bandstructure_by_material_id("mp-661")
self.assertIsInstance(bs, PhononBandStructureSymmLine)
dos = self.rester.get_phonon_dos_by_material_id("mp-661")
self.assertIsInstance(dos, CompletePhononDos)
ddb_str = self.rester.get_phonon_ddb_by_material_id("mp-661")
self.assertIsInstance(ddb_str, str)
def test_get_structures(self):
structs = self.rester.get_structures("Mn3O4")
self.assertTrue(len(structs) > 0)
def test_get_entries(self):
entries = self.rester.get_entries("TiO2")
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.composition.reduced_formula, "TiO2")
entries = self.rester.get_entries("TiO2", inc_structure=True)
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.structure.composition.reduced_formula, "TiO2")
# all_entries = self.rester.get_entries("Fe", compatible_only=False)
# entries = self.rester.get_entries("Fe", compatible_only=True)
# self.assertTrue(len(entries) < len(all_entries))
entries = self.rester.get_entries("Fe", compatible_only=True,
property_data=["cif"])
self.assertIn("cif", entries[0].data)
for e in self.rester.get_entries("CdO2", inc_structure=False):
self.assertIsNotNone(e.data["oxide_type"])
# test if it will retrieve the conventional unit cell of Ni
entry = self.rester.get_entry_by_material_id(
"mp-23", inc_structure=True, conventional_unit_cell=True)
Ni = entry.structure
self.assertEqual(Ni.lattice.a, Ni.lattice.b)
self.assertEqual(Ni.lattice.a, Ni.lattice.c)
self.assertEqual(Ni.lattice.alpha, 90)
self.assertEqual(Ni.lattice.beta, 90)
self.assertEqual(Ni.lattice.gamma, 90)
# Ensure energy per atom is same
primNi = self.rester.get_entry_by_material_id(
"mp-23", inc_structure=True, conventional_unit_cell=False)
self.assertEqual(primNi.energy_per_atom, entry.energy_per_atom)
Ni = self.rester.get_structure_by_material_id(
"mp-23", conventional_unit_cell=True)
self.assertEqual(Ni.lattice.a, Ni.lattice.b)
self.assertEqual(Ni.lattice.a, Ni.lattice.c)
self.assertEqual(Ni.lattice.alpha, 90)
self.assertEqual(Ni.lattice.beta, 90)
self.assertEqual(Ni.lattice.gamma, 90)
# Test case where convs are different from initial and final
# th = self.rester.get_structure_by_material_id(
# "mp-37", conventional_unit_cell=True)
# th_entry = self.rester.get_entry_by_material_id(
# "mp-37", inc_structure=True, conventional_unit_cell=True)
# th_entry_initial = self.rester.get_entry_by_material_id(
# "mp-37", inc_structure="initial", conventional_unit_cell=True)
# self.assertEqual(th, th_entry.structure)
# self.assertEqual(len(th_entry.structure), 4)
# self.assertEqual(len(th_entry_initial.structure), 2)
# Test if the polymorphs of Fe are properly sorted
# by e_above_hull when sort_by_e_above_hull=True
Fe_entries = self.rester.get_entries("Fe", sort_by_e_above_hull=True)
self.assertEqual(Fe_entries[0].data["e_above_hull"], 0)
def test_get_pourbaix_entries(self):
pbx_entries = self.rester.get_pourbaix_entries(["Fe", "Cr"])
for pbx_entry in pbx_entries:
self.assertTrue(isinstance(pbx_entry, PourbaixEntry))
# Ensure entries are pourbaix compatible
pbx = PourbaixDiagram(pbx_entries)
# Try binary system
# pbx_entries = self.rester.get_pourbaix_entries(["Fe", "Cr"])
# pbx = PourbaixDiagram(pbx_entries)
# TODO: Shyue Ping: I do not understand this test. You seem to
# be grabbing Zn-S system, but I don't see proper test for anything,
# including Na ref. This test also takes a long time.
# Test Zn-S, which has Na in reference solids
# pbx_entries = self.rester.get_pourbaix_entries(["Zn", "S"])
def test_get_exp_entry(self):
entry = self.rester.get_exp_entry("Fe2O3")
self.assertEqual(entry.energy, -825.5)
def test_submit_query_delete_snl(self):
s = Structure([[5, 0, 0], [0, 5, 0], [0, 0, 5]], ["Fe"], [[0, 0, 0]])
# d = self.rester.submit_snl(
# [s, s], remarks=["unittest"],
# authors="Test User <test@materialsproject.com>")
# self.assertEqual(len(d), 2)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 2)
# snlids = [d["_id"] for d in data]
# self.rester.delete_snl(snlids)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 0)
def test_get_stability(self):
entries = self.rester.get_entries_in_chemsys(["Fe", "O"])
modified_entries = []
for entry in entries:
# Create modified entries with energies that are 0.01eV higher
# than the corresponding entries.
if entry.composition.reduced_formula == "Fe2O3":
modified_entries.append(
ComputedEntry(entry.composition,
entry.uncorrected_energy + 0.01,
parameters=entry.parameters,
entry_id="mod_{}".format(entry.entry_id)))
rest_ehulls = self.rester.get_stability(modified_entries)
all_entries = entries + modified_entries
compat = MaterialsProjectCompatibility()
all_entries = compat.process_entries(all_entries)
pd = PhaseDiagram(all_entries)
for e in all_entries:
if str(e.entry_id).startswith("mod"):
for d in rest_ehulls:
if d["entry_id"] == e.entry_id:
data = d
break
self.assertAlmostEqual(pd.get_e_above_hull(e),
data["e_above_hull"])
def test_get_reaction(self):
rxn = self.rester.get_reaction(["Li", "O"], ["Li2O"])
self.assertIn("Li2O", rxn["Experimental_references"])
def test_get_substrates(self):
substrate_data = self.rester.get_substrates('mp-123', 5, [1, 0, 0])
substrates = [sub_dict['sub_id'] for sub_dict in substrate_data]
self.assertIn("mp-2534", substrates)
def test_get_surface_data(self):
data = self.rester.get_surface_data("mp-126") # Pt
one_surf = self.rester.get_surface_data('mp-129', miller_index=[-2, -3, 1])
self.assertAlmostEqual(one_surf['surface_energy'], 2.99156963, places=2)
self.assertArrayAlmostEqual(one_surf['miller_index'], [3, 2, 1])
self.assertIn("surfaces", data)
surfaces = data["surfaces"]
self.assertTrue(len(surfaces) > 0)
surface = surfaces.pop()
self.assertIn("miller_index", surface)
self.assertIn("surface_energy", surface)
self.assertIn("is_reconstructed", surface)
data_inc = self.rester.get_surface_data("mp-126", inc_structures=True)
self.assertIn("structure", data_inc["surfaces"][0])
def test_get_wulff_shape(self):
ws = self.rester.get_wulff_shape("mp-126")
self.assertTrue(isinstance(ws, WulffShape))
def test_get_cohesive_energy(self):
ecoh = self.rester.get_cohesive_energy("mp-13")
self.assertTrue(ecoh, 5.04543279)
def test_get_gb_data(self):
mo_gbs = self.rester.get_gb_data(chemsys='Mo')
self.assertEqual(len(mo_gbs), 10)
mo_gbs_s5 = self.rester.get_gb_data(pretty_formula='Mo', sigma=5)
self.assertEqual(len(mo_gbs_s5), 3)
mo_s3_112 = self.rester.get_gb_data(material_id='mp-129', sigma=3,
gb_plane=[1, -1, -2],
include_work_of_separation=True)
self.assertEqual(len(mo_s3_112), 1)
gb_f = mo_s3_112[0]['final_structure']
self.assertArrayAlmostEqual(gb_f.rotation_axis, [1, 1, 0])
self.assertAlmostEqual(gb_f.rotation_angle, 109.47122, places=4)
self.assertAlmostEqual(mo_s3_112[0]['gb_energy'], 0.47965, places=2)
self.assertAlmostEqual(mo_s3_112[0]['work_of_separation'], 6.318144, places=2)
self.assertIn("Mo24", gb_f.formula)
hcp_s7 = self.rester.get_gb_data(material_id='mp-87', gb_plane=[0, 0, 0, 1],
include_work_of_separation=True)
self.assertAlmostEqual(hcp_s7[0]['gb_energy'], 1.12, places=2)
self.assertAlmostEqual(hcp_s7[0]['work_of_separation'], 2.47, places=2)
def test_get_interface_reactions(self):
kinks = self.rester.get_interface_reactions("LiCoO2", "Li3PS4")
self.assertTrue(len(kinks) > 0)
kink = kinks[0]
self.assertIn("energy", kink)
self.assertIn("ratio_atomic", kink)
self.assertIn("rxn", kink)
self.assertTrue(isinstance(kink['rxn'], Reaction))
kinks_open_O = self.rester.get_interface_reactions(
"LiCoO2", "Li3PS4", open_el="O", relative_mu=-1)
self.assertTrue(len(kinks_open_O) > 0)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always", message="The reactant.+")
self.rester.get_interface_reactions("LiCoO2", "MnO9")
self.assertTrue("The reactant" in str(w[-1].message))
def test_download_info(self):
material_ids = ['mp-32800', 'mp-23494']
task_types = [TaskType.GGA_OPT, TaskType.GGA_UNIFORM]
file_patterns = ['vasprun*', 'OUTCAR*']
meta, urls = self.rester.get_download_info(
material_ids, task_types=task_types,
file_patterns=file_patterns
)
self.assertEqual(meta, {
'mp-23494': [
{'task_id': 'mp-669929', 'task_type': 'GGA NSCF Uniform'},
{'task_id': 'mp-23494', 'task_type': 'GGA Structure Optimization'},
# for provenance {'task_id': 'mp-688563', 'task_type': 'GGA NSCF Line'},
],
'mp-32800': [
{'task_id': 'mp-739635', 'task_type': 'GGA NSCF Uniform'},
{'task_id': 'mp-32800', 'task_type': 'GGA Structure Optimization'},
# for provenance {'task_id': 'mp-746913', 'task_type': 'GGA NSCF Line'},
]
})
prefix = 'http://labdev-nomad.esc.rzg.mpg.de/fairdi/nomad/mp/api/raw/query?'
# previous test
# ids = 'mp-23494,mp-688563,mp-32800,mp-746913'
ids = 'mp-669929,mp-23494,mp-739635,mp-32800'
self.assertEqual(
urls[0], f'{prefix}file_pattern=vasprun*&file_pattern=OUTCAR*&external_id={ids}'
)
def test_parse_criteria(self):
crit = MPRester.parse_criteria("mp-1234 Li-*")
self.assertIn("Li-O", crit["$or"][1]["chemsys"]["$in"])
self.assertIn({"task_id": "mp-1234"}, crit["$or"])
crit = MPRester.parse_criteria("Li2*")
self.assertIn("Li2O", crit["pretty_formula"]["$in"])
self.assertIn("Li2I", crit["pretty_formula"]["$in"])
self.assertIn("CsLi2", crit["pretty_formula"]["$in"])
crit = MPRester.parse_criteria("Li-*-*")
self.assertIn("Li-Re-Ru", crit["chemsys"]["$in"])
self.assertNotIn("Li-Li", crit["chemsys"]["$in"])
comps = MPRester.parse_criteria("**O3")["pretty_formula"]["$in"]
for c in comps:
self.assertEqual(len(Composition(c)), 3, "Failed in %s" % c)
chemsys = MPRester.parse_criteria("{Fe,Mn}-O")["chemsys"]["$in"]
self.assertEqual(len(chemsys), 2)
comps = MPRester.parse_criteria("{Fe,Mn,Co}O")["pretty_formula"]["$in"]
self.assertEqual(len(comps), 3, comps)
# Let's test some invalid symbols
self.assertRaises(ValueError, MPRester.parse_criteria, "li-fe")
self.assertRaises(ValueError, MPRester.parse_criteria, "LO2")
crit = MPRester.parse_criteria("POPO2")
self.assertIn("P2O3", crit["pretty_formula"]["$in"])
def test_include_user_agent(self):
headers = self.rester.session.headers
self.assertIn("user-agent", headers, msg="Include user-agent header by default")
m = re.match(
r"pymatgen/(\d+)\.(\d+)\.(\d+) \(Python/(\d+)\.(\d)+\.(\d+) ([^\/]*)/([^\)]*)\)",
headers['user-agent'])
self.assertIsNotNone(m, msg="Unexpected user-agent value {}".format(headers['user-agent']))
self.assertEqual(m.groups()[:3], tuple(pmg_version.split(".")))
self.assertEqual(
m.groups()[3:6],
tuple(str(n) for n in (sys.version_info.major, sys.version_info.minor, sys.version_info.micro))
)
self.rester = MPRester(include_user_agent=False)
self.assertNotIn("user-agent", self.rester.session.headers, msg="user-agent header unwanted")
if __name__ == "__main__":
unittest.main()
|
|
from .. import Availability, Class, Constant, Define, Method, Parameter, Type
gx_class = Class('FFT',
doc="""
This class allows for the application of predefined
filters to data in an OASIS database. The system uses
the Winograd algorithm to transform data in the spatial
domain to the wavenumber or Fourier domain.
""")
gx_defines = [
Define('FFT_DETREND',
doc="Detrending option",
constants=[
Constant('FFT_DETREND_NONE', value='0', type=Type.INT32_T,
doc="No trend remove"),
Constant('FFT_DETREND_ENDS', value='1', type=Type.INT32_T,
doc="Detrend order 1 using only two end points"),
Constant('FFT_DETREND_ALL', value='2', type=Type.INT32_T,
doc="Detrend order 1 using all data points"),
Constant('FFT_DETREND_MEAN', value='3', type=Type.INT32_T,
doc="Remove mean value")
])]
gx_methods = {
'Miscellaneous': [
Method('AppDens_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Appparent density filter",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('thick', type=Type.DOUBLE,
doc="Thickness (meters) of the earth model"),
Parameter('dens', type=Type.DOUBLE,
doc="Background density (g/cm3) (default = 0)")
]),
Method('AppSusc_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Apparent susceptiblity filter",
notes="""
Reduction to magnetic pole (:func:`RedPol_FFT`) and downward continuation
(:func:`Contin_FFT`) should be called BEFORE using :func:`AppSusc_FFT`.
""",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('strength', type=Type.DOUBLE,
doc="Total magnetic field strength")
]),
Method('BandPass_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Bandpass filter (using low and high wavelength cutoffs)",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('llen', type=Type.DOUBLE,
doc="Low Cutoff wavelength (meters)"),
Parameter('hlen', type=Type.DOUBLE,
doc="High Cutoff wavelength (meter)"),
Parameter('pass_defined', type=Type.INT32_T,
doc="1= Pass the defined band (default); 0= Reject the band")
]),
Method('BWorth_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Butterworth filter",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('clen', type=Type.DOUBLE,
doc="Central cutoff wavelength (meter)"),
Parameter('degree', type=Type.DOUBLE,
doc="Degree of the filter function (default = 8.0)"),
Parameter('filter_type', type=Type.INT32_T,
doc="Filter type: 1= Low-pass (regional) filter (default) 0= High-pass (residual) filter")
]),
Method('RCFilter_FFT', module='geogxx', version='8.5.0',
availability=Availability.EXTENSION,
doc="RC filter",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('clen', type=Type.DOUBLE,
doc="Central cutoff wavelength (meter)"),
Parameter('filter_type', type=Type.INT32_T,
doc="Filter type: 1= Low-pass (regional) filter (default) 0= High-pass (residual) filter")
]),
Method('Contin_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Upward/Downward continuation filter",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('dist', type=Type.DOUBLE,
doc="Distance to continue; positive = downwards negative = upwards")
]),
Method('CosRoll_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Cosine roll-off filter",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('llen', type=Type.DOUBLE,
doc="Low wavelength start point (meters)"),
Parameter('hlen', type=Type.DOUBLE,
doc="High wavelength end point (meters)"),
Parameter('degree', type=Type.DOUBLE,
doc="Degree of the filter function (default = 2.0)"),
Parameter('type', type=Type.INT32_T,
doc="Filter type: 1= Low-pass (regional) filter (default) 0= High-pass (residual) filter")
]),
Method('Create_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Create a New :class:`FFT` with detrend options.",
notes="""
The detrending options control the removal of a trend from the data
before the :class:`FFT` is applied. The default data expansion is 10% before :class:`FFT`.
""",
return_type="FFT",
return_doc=":class:`FFT` Object",
parameters = [
Parameter('gvv', type="VV",
doc=":class:`VV` to transform."),
Parameter('interv', type=Type.DOUBLE,
doc="Element space interval"),
Parameter('trend', type=Type.INT32_T,
doc=":def:`FFT_DETREND`")
]),
Method('CreateEx_FFT', module='geogxx', version='5.1.8',
availability=Availability.EXTENSION,
doc="Create a New :class:`FFT` with detrend and expansion options.",
notes="""
The detrending options control the removal of a trend from the data
before the :class:`FFT` is applied. The expansion options control the minimum
data expansion before the :class:`FFT` is applied.
""",
return_type="FFT",
return_doc=":class:`FFT` Object",
parameters = [
Parameter('gvv', type="VV",
doc=":class:`VV` to transform."),
Parameter('interv', type=Type.DOUBLE,
doc="Element space interval"),
Parameter('trend', type=Type.INT32_T,
doc=":def:`FFT_DETREND`"),
Parameter('expansion', type=Type.DOUBLE,
doc="Minimum expansion %")
]),
Method('CreateRef_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="""
Create :class:`FFT` object with detrend options from reference (original) channel,
but no :class:`FFT` process.
""",
notes="""
This just creates an object. It is intended to be called
immediately after with :func:`SetVV_FFT`.
""",
return_type="FFT",
return_doc=":class:`FFT` Object",
parameters = [
Parameter('gvv', type="VV",
doc=":class:`VV` contains channel data to perform :class:`FFT` operations upon."),
Parameter('interv', type=Type.DOUBLE,
doc="Element space interval, should be the same as in :func:`CreateEx_FFT` call"),
Parameter('trend', type=Type.INT32_T,
doc=":def:`FFT_DETREND`")
]),
Method('CreateRefEx_FFT', module='geogxx', version='5.1.8',
availability=Availability.EXTENSION,
doc="""
Create :class:`FFT` object with detrend and expansion options from reference (original) channel,
but no :class:`FFT` process.
""",
notes="""
This just creates an object. It is intended to be called
immediately after with :func:`SetVV_FFT`.
""",
return_type="FFT",
return_doc=":class:`FFT` Object",
parameters = [
Parameter('gvv', type="VV",
doc=":class:`VV` contains channel data to perform :class:`FFT` operations upon."),
Parameter('interv', type=Type.DOUBLE,
doc="Element space interval, should be the same as in :func:`CreateEx_FFT` call"),
Parameter('trend', type=Type.INT32_T,
doc=":def:`FFT_DETREND`"),
Parameter('expansion', type=Type.DOUBLE,
doc="Minimum expansion %, should be the same as in :func:`CreateEx_FFT` call"),
Parameter('d_cmult', type=Type.DOUBLE,
doc="DC level multiple")
]),
Method('Destroy_FFT', module='geogxx', version='5.0.0',
availability=Availability.PUBLIC,
doc="Destroy an :class:`FFT`.",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to destroy.")
]),
Method('Gaus_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Gaussian filter",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('dev', type=Type.DOUBLE,
doc="Standard deviation cutoff of function (meters)"),
Parameter('type', type=Type.INT32_T,
doc="Filter type: 1= Low-pass (residual) filter (default) 0= High-pass (regional) filter")
]),
Method('GetVV_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Copies real and imaginary :class:`VV`'s to user :class:`VV`'s.",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT"),
Parameter('gv_vr', type="VV",
doc="Real component"),
Parameter('gv_vi', type="VV",
doc="Imaginary component")
]),
Method('HDrv_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Horizontal derivative",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('order', type=Type.DOUBLE,
doc="Order of differentiation (default = 1)")
]),
Method('HighPass_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="High bandpass filter",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('wlen', type=Type.DOUBLE,
doc="Cutoff wavelength (meter)"),
Parameter('fid_int', type=Type.DOUBLE,
doc="Fiducial increment of the :class:`FFT`'s channel data")
]),
Method('HInt_FFT', module='geogxx', version='5.1.4',
availability=Availability.EXTENSION,
doc="Horizontal integration",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to integrate")
]),
Method('Inverse_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Inverse the :class:`FFT` from wave number domain to space domain",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to invert"),
Parameter('gvv', type="VV",
doc="Output :class:`VV`"),
Parameter('gv_vm', type="VV",
doc="Original :class:`VV` which was used to create :class:`FFT` (will be used as mask for output :class:`VV`; no masking if this parameter is NULL)")
]),
Method('LowPass_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Low bandpass filter",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('wlen', type=Type.DOUBLE,
doc="Cutoff wavelength (meters)")
]),
Method('RedPol_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Reduction to magnetic pole",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('inc', type=Type.DOUBLE,
doc="Geomagnetic inclination (degrees)"),
Parameter('dec', type=Type.DOUBLE,
doc="Geomagnetic declination (degrees)"),
Parameter('incp', type=Type.DOUBLE,
doc="Inclination (degrees) for amplitude correction (default = 20.0)"),
Parameter('dir', type=Type.DOUBLE,
doc="Direction (degrees) of Line from North")
]),
Method('rNyquist_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Gets the Nyquist frequency (wavenumbers/sample unit).",
return_type=Type.DOUBLE,
return_doc="Nyquist frequency (wavenumbers/sample unit).",
parameters = [
Parameter('fft', type="FFT")
]),
Method('rSampIncr_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Gets the original sample increment.",
return_type=Type.DOUBLE,
return_doc="Original sample increment.",
parameters = [
Parameter('fft', type="FFT")
]),
Method('rWaveIncr_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Get the wave number increment.",
return_type=Type.DOUBLE,
return_doc="Wave number increment",
parameters = [
Parameter('fft', type="FFT")
]),
Method('SetVV_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Sets real and imaginary VVs in :class:`FFT`.",
notes="""
The :class:`VV` must have been obtained from the same :class:`FFT`
using the :func:`SetVV_FFT` method.
""",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT"),
Parameter('gv_vr', type="VV",
doc="Real component"),
Parameter('gv_vi', type="VV",
doc="Imaginary component")
]),
Method('Spectrum_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Calculates a power spectrum",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to calculate power spectrum for"),
Parameter('gvv', type="VV",
doc="Output power spectrum :class:`VV`")
]),
Method('VDrv_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Vertical derivative",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to filter"),
Parameter('order', type=Type.DOUBLE,
doc="Order of differentiation (default = 1)")
]),
Method('VInt_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Vertical integration",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` to integrate")
]),
Method('WriteSpectrum_FFT', module='geogxx', version='5.0.0',
availability=Availability.EXTENSION,
doc="Writes a power spectrum to a file",
return_type=Type.VOID,
parameters = [
Parameter('fft', type="FFT",
doc=":class:`FFT` used to calculate power spectrum :class:`VV`"),
Parameter('gvv', type="VV",
doc="Output power spectrum :class:`VV`"),
Parameter('out_file', type=Type.STRING,
doc="File name for output spectrum")
])
]
}
|
|
#!/eecs/research/asr/mingbin/python-workspace/hopeless/bin/python
import numpy, argparse, logging, time, cPickle, codecs, copy
from itertools import product, chain
# ================================================================================
if __name__ == '__main__':
logger = logging.getLogger()
logging.basicConfig( format = '%(asctime)s : %(levelname)s : %(message)s',
level = logging.INFO )
parser = argparse.ArgumentParser()
parser.add_argument( 'basename', type = str )
parser.add_argument( '--iflytek', action = 'store_true', default = False )
args = parser.parse_args()
logger.info( str(args) + '\n' )
from fofe_mention_net import *
# ================================================================================
with open( args.basename + '.config', 'rb' ) as fp:
config = cPickle.load( fp )
logger.info( config.__dict__ )
logger.info( 'configuration loaded' )
valid_file = 'kbp-result/valid-tune-%s.predicted' % config.language
test_file = 'kbp-result/test-tune-%s.predicted' % config.language
assert config.language == 'spa' or args.iflytek
if not os.path.exists( valid_file ) or \
config.language == 'spa' or \
not os.path.exists( test_file ):
mention_net = fofe_mention_net( config )
mention_net.fromfile( args.basename )
logger.info( 'model loaded' )
if config.language != 'cmn':
numericizer1 = vocabulary( config.word_embedding + '-case-insensitive.wordlist',
config.char_alpha, False )
numericizer2 = vocabulary( config.word_embedding + '-case-sensitive.wordlist',
config.char_alpha, True )
else:
numericizer1 = chinese_word_vocab( config.word_embedding + '-char.wordlist' )
numericizer2 = chinese_word_vocab( config.word_embedding + \
('-avg.wordlist' if config.average else '-word.wordlist') )
logger.info( 'vocabulary loaded' )
kbp_gazetteer = gazetteer( config.data_path + '/kbp-gazetteer' )
# idx2ner = [ 'PER_NAM', 'PER_NOM', 'ORG_NAM', 'GPE_NAM', 'LOC_NAM', 'FAC_NAM', 'TTL_NAM', 'O' ]
idx2ner = [ 'PER_NAM', 'ORG_NAM', 'GPE_NAM', 'LOC_NAM', 'FAC_NAM',
'PER_NOM', 'ORG_NOM', 'GPE_NOM', 'LOC_NOM', 'FAC_NOM',
'O' ]
if not os.path.exists( valid_file ):
# load 10% KBP test data
source = imap( lambda x: x[1],
ifilter( lambda x : x[0] % 10 >= 9,
enumerate( imap( lambda x: x[:4],
LoadED( config.data_path + '/%s-eval-parsed' % config.language ) ) ) ) )
# load 5% iflytek data
if args.iflytek:
source = chain( source,
imap( lambda x: x[1],
ifilter( lambda x : 90 <= x[0] % 100 < 95,
enumerate( imap( lambda x: x[:4],
LoadED( 'iflytek-clean-%s' % config.language ) ) ) ) ) )
# istantiate a batch constructor
valid = batch_constructor( source,
numericizer1, numericizer2, gazetteer = kbp_gazetteer,
alpha = config.word_alpha, window = config.n_window,
n_label_type = config.n_label_type,
language = config.language )
logger.info( 'valid: ' + str(valid) )
if config.language != 'spa' and not os.path.exists( test_file ):
source = imap( lambda x: x[1],
ifilter( lambda x: x[0] % 100 >= 95,
enumerate( imap( lambda x: x[:4],
LoadED( 'iflytek-clean-%s' % config.language ) ) ) ) )
test = batch_constructor( source,
numericizer1, numericizer2, gazetteer = kbp_gazetteer,
alpha = config.word_alpha, window = config.n_window,
n_label_type = config.n_label_type,
language = config.language )
logger.info( 'test: ' + str(test) )
# ================================================================================
if not os.path.exists( 'kbp-result' ):
os.makedirs( 'kbp-result' )
###############################################
########## go through validation set ##########
###############################################
if not os.path.exists( valid_file ):
with open( valid_file, 'wb' ) as valid_predicted:
cost, cnt = 0, 0
for example in valid.mini_batch_multi_thread(
256 if config.feature_choice & (1 << 9 ) > 0 else 1024,
False, 1, 1, config.feature_choice ):
c, pi, pv = mention_net.eval( example )
cost += c * example[-1].shape[0]
cnt += example[-1].shape[0]
for expected, estimate, probability in zip( example[-1], pi, pv ):
print >> valid_predicted, '%d %d %s' % \
(expected, estimate, ' '.join( [('%f' % x) for x in probability.tolist()] ))
valid_cost = cost / cnt
logger.info( 'validation set iterated' )
#########################################
########## go through test set ##########
#########################################
if config.language != 'spa' and not os.path.exists( test_file ):
with open( test_file, 'wb' ) as test_predicted:
cost, cnt = 0, 0
for example in test.mini_batch_multi_thread(
256 if config.feature_choice & (1 << 9 ) > 0 else 1024,
False, 1, 1, config.feature_choice ):
c, pi, pv = mention_net.eval( example )
cost += c * example[-1].shape[0]
cnt += example[-1].shape[0]
for expected, estimate, probability in zip( example[-1], pi, pv ):
print >> test_predicted, '%d %d %s' % \
(expected, estimate, ' '.join( [('%f' % x) for x in probability.tolist()] ))
test_cost = cost / cnt
logger.info( 'test set iterated' )
###################################################################################
########## exhaustively iterate 3 decodding algrithms with 0.x cut-off ############
###################################################################################
# algo_list = ['highest-first', 'longest-first', 'subsumption-removal']
idx2algo = { 1: 'highest-first', 2: 'longest-first', 3:'subsumption-removal' }
algo2idx = { 'highest-first': 1, 'longest-first': 2, 'subsumption-removal': 3 }
source = imap( lambda x: x[1],
ifilter( lambda x : x[0] % 10 >= 9,
enumerate( imap( lambda x: x[:4],
LoadED( config.data_path + '/%s-eval-parsed' % config.language ) ) ) ) )
if args.iflytek:
source = chain( source,
imap( lambda x: x[1],
ifilter( lambda x : 90 <= x[0] % 100 < 95,
enumerate( imap( lambda x: x[:4],
LoadED( 'iflytek-clean-%s' % config.language ) ) ) ) ) )
# ================================================================================
pp = [ p for p in PredictionParser( source,
valid_file,
config.n_window,
n_label_type = config.n_label_type ) ]
algorithm = config.algorithm
threshold = config.threshold
name = [ idx2algo[i] for i in algorithm ]
# ================================================================================
_, _, best_dev_fb1, info = evaluation( pp, [0.5, 0.9], [2, 1], True,
n_label_type = config.n_label_type,
decoder_callback = IndividualThreshold( [0.5] * 10, 0.5 ) )
logger.info( '%s\n%s' % ('validation', info) )
_, _, best_dev_fb1, info = evaluation( pp, [0.5, 0.5], [1, 1], True,
n_label_type = config.n_label_type )
logger.info( '%s\n%s' % ('validation', info) )
_, _, best_dev_fb1, info = evaluation( pp, threshold, algorithm, True,
n_label_type = config.n_label_type )
logger.info( '%s\n%s' % ('validation', info) )
# ================================================================================
config.customized_threshold = IndividualThreshold( [threshold[0]] * 10, [threshold[1]] * 10 )
# for algorithm in product( [1, 2], repeat = 2 ):
# algorithm = list( algorithm )
for _ in xrange(3):
for mt in xrange(10):
for t in numpy.arange(0.1, 1, 0.1).tolist():
it = copy.deepcopy( config.customized_threshold )
it.outer[mt] = t
precision, recall, f1, info = evaluation( pp, threshold, algorithm, True,
n_label_type = config.n_label_type,
decoder_callback = it )
logger.info( 'validation -- precision: %f, recall: %f, fb1: %f' % (precision, recall, f1) )
if f1 > best_dev_fb1:
best_dev_fb1, best_algorithm = f1, algorithm
best_precision, best_recall = precision, recall
# update threshold
config.customized_threshold = copy.deepcopy( it )
logger.info( 'algorithm: %-20s outer: %s' % \
(str([idx2algo[i] for i in algorithm]), str(it.outer)) )
logger.info( '%s\n%s' % ('validation', info) )
for mt in xrange(10):
for t in numpy.arange(0.1, 1, 0.1).tolist():
it = copy.deepcopy( config.customized_threshold )
it.inner[mt] = t
precision, recall, f1, info = evaluation( pp, threshold, algorithm, True,
n_label_type = config.n_label_type,
decoder_callback = it )
logger.info( 'validation -- precision: %f, recall: %f, fb1: %f' % (precision, recall, f1) )
if f1 > best_dev_fb1:
best_dev_fb1, best_algorithm = f1, algorithm
best_precision, best_recall = precision, recall
# update threshold
config.customized_threshold = copy.deepcopy( it )
logger.info( 'algorithm: %-20s inner: %s' % \
(str([idx2algo[i] for i in algorithm]), str(it.inner)) )
logger.info( '%s\n%s' % ('validation', info) )
logger.info( 'outer: ' + str(config.customized_threshold.outer) )
logger.info( 'inner: ' + str(config.customized_threshold.inner) )
# ================================================================================
# for threshold in product( numpy.arange( max(threshold[0] - 0.1, 0.2),
# min(threshold[0] + 0.2, 1.0),
# 0.05 ).tolist(),
# numpy.arange( max(threshold[1] - 0.1, 0.2),
# min(threshold[1] + 0.2, 1.0),
# 0.05 ).tolist() ):
# threshold = list( threshold )
# for customized_threshold in [ 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9 ]:
# customized = ORGcoverGPE( customized_threshold )
# precision, recall, f1, info = evaluation( pp, threshold, algorithm, True,
# n_label_type = config.n_label_type,
# decoder_callback = customized )
# logger.info( ('cut-off: %s, algorithm: %-20s' % (str(threshold), name)) +
# (', validation -- precision: %f, recall: %f, fb1: %f' % (precision, recall, f1)) )
# if f1 > best_dev_fb1:
# best_dev_fb1, best_threshold, best_algorithm = f1, threshold, algorithm
# best_precision, best_recall = precision, recall
# # update threshold
# config.threshold = best_threshold
# config.customized_threshold = customized
# logger.info( '%s\n%s' % ('validation', info) )
# ================================================================================
# report validation set performance
source = imap( lambda x: x[1],
ifilter( lambda x : x[0] % 10 >= 9,
enumerate( imap( lambda x: x[:4],
LoadED( config.data_path + '/%s-eval-parsed' % config.language ) ) ) ) )
if args.iflytek:
source = chain( source,
imap( lambda x: x[1],
ifilter( lambda x : 90 <= x[0] % 100 < 95,
enumerate( imap( lambda x: x[:4],
LoadED( 'iflytek-clean-%s' % config.language ) ) ) ) ) )
precision, recall, f1, info = evaluation( PredictionParser( source,
valid_file,
config.n_window,
n_label_type = config.n_label_type ),
config.threshold,
config.algorithm,
True,
analysis = None,
n_label_type = config.n_label_type,
decoder_callback = config.customized_threshold )
logger.info( '%s\n%s' % ('validation', info) )
if config.language != 'spa':
# report test set performance
source = imap( lambda x: x[1],
ifilter( lambda x: x[0] % 100 >= 95,
enumerate( imap( lambda x: x[:4],
LoadED( 'iflytek-clean-%s' % config.language ) ) ) ) )
precision, recall, f1, info = evaluation( PredictionParser( source,
test_file,
config.n_window,
n_label_type = config.n_label_type ),
config.threshold,
config.algorithm,
True,
analysis = None,
n_label_type = config.n_label_type,
decoder_callback = config.customized_threshold )
logger.info( '%s\n%s' % ('test', info) )
with open( args.basename + '.config', 'wb' ) as fp:
cPickle.dump( config, fp )
logger.info( 'customized threshold is stored in config' )
|
|
from __future__ import print_function
import datetime
import hashlib
import os
import shutil
import sys
import shell_util
figure_str = r"""
\begin{figure}[h]
\centering
\includegraphics[width=0.5\linewidth]{@figname@}
\caption{\label{fig:@figlabel@} caption goes here}
\end{figure}
"""
class _TermColors(object):
WARNING = '\033[93m'
SUCCESS = '\033[92m'
FAIL = '\033[91m'
BOLD = '\033[1m'
ENDC = '\033[0m'
def warning(ostr):
"""
Output a string to the terminal colored orange to indicate a
warning
"""
print(_TermColors.WARNING + ostr + _TermColors.ENDC)
def success(ostr):
"""
Output a string to the terminal colored green to indicate
success
"""
print(_TermColors.SUCCESS + ostr + _TermColors.ENDC)
#=============================================================================
# journal-specific routines
#=============================================================================
def get_entry_string():
now = datetime.datetime.now()
return str(now.replace(microsecond=0)).replace(" ", "_").replace(":", ".")
def get_dir_string():
now = datetime.date.today()
return str(now)
def entry(nickname, images, defs, string=None):
try: editor = os.environ["EDITOR"]
except:
editor = "emacs"
# determine the filename
entry_id = get_entry_string()
entry_dir = get_dir_string()
ofile = entry_id + ".tex"
# determine the directory we place it in -- this is the form yyyy-mm-dd/
odir = "{}/journal-{}/entries/{}/".format(defs[nickname]["working_path"],
nickname,
entry_dir)
if not os.path.isdir(odir):
try: os.mkdir(odir)
except:
sys.exit("ERROR: unable to make directory {}".format(odir))
# create the entry file. If we passed in a string, then write it
# too.
try: f = open(odir + ofile, "w")
except:
sys.exit("ERROR: unable to open {}".format(odir + ofile))
if string is not None:
f.write(string)
else:
f.write("% journal: {}\n".format(nickname))
# if there are images, then copy them over and add the figure
# headings to the entry
images_copied = []
for im in images:
# does an image by that name already live in the dest
# directory?
src = "{}/{}".format(defs["image_dir"], im)
dest = odir
if os.path.isfile("{}/{}".format(dest, im)):
im_copy = "{}_{}".format(entry_id.replace(".", "_"), im)
else:
im_copy = im
dest = "{}/{}".format(dest, im_copy)
# copy it
try: shutil.copy(src, dest)
except:
sys.exit("ERROR: unable to copy image {} to {}".format(src, dest))
images_copied.append(im_copy)
# create a unique label for latex referencing
idx = im.lower().rfind(".jpg")
idx = max(idx, im.lower().rfind(".png"))
idx = max(idx, im.lower().rfind(".gif"))
idx = max(idx, im.lower().rfind(".pdf"))
if idx >= 0:
im0 = "{}:{}".format(entry_id, im[:idx])
fname = "entries/{}/{}".format(entry_dir, im_copy)
# add the figure text
for l in figure_str.split("\n"):
f.write("{}\n".format(
l.replace("@figname@", fname).replace("@figlabel@", im0).rstrip()))
# add the entry id as a LaTeX comment
f.write("\n\n% entry: {}".format(entry_id))
f.close()
# get the hash for the file
hash_orig = hashlib.md5(open(odir + ofile, 'r').read().encode('utf-8')).hexdigest()
# launch the editor specified in the EDITOR environment variable
if string == None:
if editor == "emacs":
prog = "emacs -nw {}/{}".format(odir, ofile)
else:
prog = "{} {}/{}".format(editor, odir, ofile)
stdout, stderr, rc = shell_util.run(prog)
# did the user actually make edits?
hash_new = hashlib.md5(open(odir + ofile, 'r').read().encode('utf-8')).hexdigest()
if string == None and len(images) == 0 and (hash_new == hash_orig):
# user didn't do anything interesting
answer = raw_input("no input made -- add this to the journal? (y/N) ")
if answer.lower() != "y":
try: os.remove(odir + ofile)
except:
sys.exit("ERROR: unable to remove file -- entry aborted")
sys.exit("entry aborted")
# any tags?
#tags = find_tags(odir + ofile)
# commit the entry to the working git repo
os.chdir(odir)
stdout, stderr, rc = shell_util.run("git add " + ofile)
stdout, stderr, rc = shell_util.run("git commit -m 'new entry' " + ofile)
# commit any images too
for im in images_copied:
stdout, stderr, rc = shell_util.run("git add " + im)
stdout, stderr, rc = shell_util.run("git commit -m 'new image' " + im)
# helpful edit suggestion
print("entry created. Use 'pyjournal.py edit {}' to edit this entry.".format(entry_id))
def edit(nickname, date_string, defs):
if date_string == "last":
last = elist(nickname, 1, defs, print_out=False)
date_string = last[0][0]
# find the file corresponding to the date string
entry_dir = "{}/journal-{}/entries/".format(defs[nickname]["working_path"], nickname)
os.chdir(entry_dir)
# if we got the date string from the prompt, it may have a "_"
date_string = date_string.replace("_", " ")
try: d, t = date_string.split(" ")
except:
sys.exit("invalid date string")
if not os.path.isdir(d):
sys.exit("entry directory does not exist")
file = "{}/{}_{}.tex".format(d, d, t)
if not os.path.isfile(file):
sys.exit("entry {} does not exist".format(file))
# open the file for appending
try: editor = os.environ["EDITOR"]
except:
editor = "emacs"
entry_id = get_entry_string()
try: f = open(file, "a+")
except:
sys.exit("ERROR: unable to open {}".format(file))
f.write("\n\n% entry edited: {}".format(entry_id))
f.close()
if editor == "emacs":
prog = "emacs -nw {}".format(file)
else:
prog = "{} {}".format(editor, file)
stdout, stderr, rc = shell_util.run(prog)
# git commit any changes
stdout, stderr, rc = shell_util.run("git commit -m 'edited entry' " + file)
def appendix(nickname, name, defs):
# is there an appendix directory?
app_dir = "{}/journal-{}/entries/appendices/".format(
defs[nickname]["working_path"], nickname)
if not os.path.isdir(app_dir):
try: os.mkdir(app_dir)
except:
sys.exit("ERROR: unable to make the appendices/ directory")
os.chdir(app_dir)
# edit the file, create if it does not exist
file = "{}.tex".format(name)
if not os.path.isfile(file):
warning("appendix {} will be created".format(name))
# open the file for appending
try: editor = os.environ["EDITOR"]
except:
editor = "emacs"
try: f = open(file, "a+")
except:
sys.exit("ERROR: unable to open {}".format(file))
entry_id = get_entry_string()
f.write("\n\n% entry edited: {}".format(entry_id))
f.close()
if editor == "emacs":
prog = "emacs -nw {}".format(file)
else:
prog = "{} {}".format(editor, file)
stdout, stderr, rc = shell_util.run(prog)
# git commit any changes
stdout, stderr, rc = shell_util.run("git add " + file)
stdout, stderr, rc = shell_util.run("git commit -m 'edited appendix' " + file)
def elist(nickname, num, defs, print_out=True):
entry_dir = "{}/journal-{}/entries/".format(defs[nickname]["working_path"], nickname)
entries = {}
for d in os.listdir(entry_dir):
if os.path.isdir(entry_dir + d):
edir = os.path.normpath("{}/{}".format(entry_dir, d))
for t in os.listdir(edir):
if t.endswith(".tex") and not "appendices" in edir:
entries[t] = "{}/{}".format(edir, t)
e = list(entries.keys())
e.sort(reverse=True)
last_entries = []
for n in range(min(num, len(e))):
idx = e[n].rfind(".tex")
entry_id = e[n][:idx]
last_entries.append((entry_id, entries[e[n]]))
if print_out:
for e in last_entries:
print("{} : {}".format(e[0], e[1]))
else:
return last_entries
#=============================================================================
# todo-specific routines
#=============================================================================
def rename_list(old_name, new_name, defs):
todo_dir = "{}/todo_list/".format(defs["working_path"])
try: os.chdir(todo_dir)
except:
sys.exit("ERROR: unable to cd into working directory {}".format(todo_dir))
if not os.path.isfile("{}.list".format(old_name)):
sys.exit("ERROR: list does not exist")
try: shutil.move("{}.list".format(old_name),
"{}.list".format(new_name))
except:
sys.exit("ERROR: unable to rename list")
stdout, stderr, rc = shell_util.run("git add {}.list".format(new_name))
stdout, stderr, rc = \
shell_util.run("git commit -m 'renamed' {}.list {}.list".format(old_name, new_name))
def add_list(list_name, defs):
todo_dir = "{}/todo_list/".format(defs["working_path"])
try: os.chdir(todo_dir)
except:
sys.exit("ERROR: unable to cd into working directory {}".format(todo_dir))
# does it already exist?
if os.path.isfile("{}.list".format(list_name)):
sys.exit("ERROR: list already exists")
# create the list file
try: f = open("{}.list".format(list_name), "w")
except:
sys.exit("ERROR: unable to create list {}".format(list_name))
f.write("# list: {} managed by pytodo".format(list_name))
f.close()
# commit the list
stdout, stderr, rc = shell_util.run("git add {}.list".format(list_name))
stdout, stderr, rc = shell_util.run("git commit -m 'new list' {}.list".format(list_name))
def tlist(defs):
todo_dir = "{}/todo_list/".format(defs["working_path"])
try: os.chdir(todo_dir)
except:
sys.exit("ERROR: unable to cd into working directory {}".format(todo_dir))
# find the lists
known_lists = [os.path.splitext(f)[0] for f in os.listdir(".") if
os.path.isfile(f) and f.endswith(".list")]
for l in sorted(known_lists):
if l == defs["default_list"]:
success("* {}".format(l))
else:
warning(" {}".format(l))
def show(list_name, defs):
todo_dir = "{}/todo_list/".format(defs["working_path"])
try: os.chdir(todo_dir)
except:
sys.exit("ERROR: unable to cd into working directory {}".format(todo_dir))
# does it already exist?
if not os.path.isfile("{}.list".format(list_name)):
sys.exit("ERROR: list does not exist")
hash_orig = hashlib.md5(open("{}.list".format(list_name), 'r').read().encode('utf-8')).hexdigest()
# open for editing
try: editor = os.environ["EDITOR"]
except:
editor = "emacs"
if editor == "emacs":
prog = "emacs -nw {}.list".format(list_name)
else:
prog = "{} {}.list".format(editor, list_name)
stdout, stderr, rc = shell_util.run(prog)
hash_new = hashlib.md5(open("{}.list".format(list_name), 'r').read().encode('utf-8')).hexdigest()
if hash_orig != hash_new:
# git-store the updates
stdout, stderr, rc = \
shell_util.run("git commit -m 'edited list {}.list' {}.list".format(list_name, list_name))
if rc != 0:
print(stdout, stderr)
sys.exit("ERROR: there were git errors commiting the list")
def cat(list_name, defs):
todo_dir = "{}/todo_list/".format(defs["working_path"])
try: os.chdir(todo_dir)
except:
sys.exit("ERROR: unable to cd into working directory {}".format(todo_dir))
try: f = open("{}.list".format(list_name), "r")
except:
sys.exit("ERROR: list {} does not exist".format(list_name))
print(f.read())
f.close()
|
|
# -*- coding: utf-8 -*-
from struct import pack, unpack
from ws4py.exc import FrameTooLargeException, ProtocolException
from ws4py.compat import py3k, ord
# Frame opcodes defined in the spec.
OPCODE_CONTINUATION = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
__all__ = ['Frame']
class Frame(object):
def __init__(self, opcode=None, body=b'', masking_key=None, fin=0, rsv1=0, rsv2=0, rsv3=0):
"""
Implements the framing protocol as defined by RFC 6455.
.. code-block:: python
:linenos:
>>> test_mask = 'XXXXXX' # perhaps from os.urandom(4)
>>> f = Frame(OPCODE_TEXT, 'hello world', masking_key=test_mask, fin=1)
>>> bytes = f.build()
>>> bytes.encode('hex')
'818bbe04e66ad6618a06d1249105cc6882'
>>> f = Frame()
>>> f.parser.send(bytes[0])
1
>>> f.parser.send(bytes[1])
4
.. seealso:: Data Framing http://tools.ietf.org/html/rfc6455#section-5.2
"""
if not isinstance(body, bytes):
raise TypeError("The body must be properly encoded")
self.opcode = opcode
self.body = body
self.masking_key = masking_key
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.payload_length = len(body)
self._parser = None
@property
def parser(self):
if self._parser is None:
self._parser = self._parsing()
# Python generators must be initialized once.
next(self.parser)
return self._parser
def _cleanup(self):
if self._parser:
self._parser.close()
self._parser = None
def build(self):
"""
Builds a frame from the instance's attributes and returns
its bytes representation.
"""
header = b''
if self.fin > 0x1:
raise ValueError('FIN bit parameter must be 0 or 1')
if 0x3 <= self.opcode <= 0x7 or 0xB <= self.opcode:
raise ValueError('Opcode cannot be a reserved opcode')
## +-+-+-+-+-------+
## |F|R|R|R| opcode|
## |I|S|S|S| (4) |
## |N|V|V|V| |
## | |1|2|3| |
## +-+-+-+-+-------+
header = pack('!B', ((self.fin << 7)
| (self.rsv1 << 6)
| (self.rsv2 << 5)
| (self.rsv3 << 4)
| self.opcode))
## +-+-------------+-------------------------------+
## |M| Payload len | Extended payload length |
## |A| (7) | (16/63) |
## |S| | (if payload len==126/127) |
## |K| | |
## +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
## | Extended payload length continued, if payload len == 127 |
## + - - - - - - - - - - - - - - - +-------------------------------+
if self.masking_key: mask_bit = 1 << 7
else: mask_bit = 0
length = self.payload_length
if length < 126:
header += pack('!B', (mask_bit | length))
elif length < (1 << 16):
header += pack('!B', (mask_bit | 126)) + pack('!H', length)
elif length < (1 << 63):
header += pack('!B', (mask_bit | 127)) + pack('!Q', length)
else:
raise FrameTooLargeException()
## + - - - - - - - - - - - - - - - +-------------------------------+
## | |Masking-key, if MASK set to 1 |
## +-------------------------------+-------------------------------+
## | Masking-key (continued) | Payload Data |
## +-------------------------------- - - - - - - - - - - - - - - - +
## : Payload Data continued ... :
## + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
## | Payload Data continued ... |
## +---------------------------------------------------------------+
body = self.body
if not self.masking_key:
return bytes(header + body)
return bytes(header + self.masking_key + self.mask(body))
def _parsing(self):
"""
Generator to parse bytes into a frame. Yields until
enough bytes have been read or an error is met.
"""
buf = b''
some_bytes = b''
# yield until we get the first header's byte
while not some_bytes:
some_bytes = (yield 1)
first_byte = some_bytes[0] if isinstance(some_bytes, bytearray) else ord(some_bytes[0])
# frame-fin = %x0 ; more frames of this message follow
# / %x1 ; final frame of this message
self.fin = (first_byte >> 7) & 1
self.rsv1 = (first_byte >> 6) & 1
self.rsv2 = (first_byte >> 5) & 1
self.rsv3 = (first_byte >> 4) & 1
self.opcode = first_byte & 0xf
# frame-rsv1 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv2 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
# frame-rsv3 = %x0 ; 1 bit, MUST be 0 unless negotiated otherwise
if self.rsv1 or self.rsv2 or self.rsv3:
raise ProtocolException()
# control frames between 3 and 7 as well as above 0xA are currently reserved
if 2 < self.opcode < 8 or self.opcode > 0xA:
raise ProtocolException()
# control frames cannot be fragmented
if self.opcode > 0x7 and self.fin == 0:
raise ProtocolException()
# do we already have enough some_bytes to continue?
some_bytes = some_bytes[1:] if some_bytes and len(some_bytes) > 1 else b''
# Yield until we get the second header's byte
while not some_bytes:
some_bytes = (yield 1)
second_byte = some_bytes[0] if isinstance(some_bytes, bytearray) else ord(some_bytes[0])
mask = (second_byte >> 7) & 1
self.payload_length = second_byte & 0x7f
# All control frames MUST have a payload length of 125 some_bytes or less
if self.opcode > 0x7 and self.payload_length > 125:
raise FrameTooLargeException()
if some_bytes and len(some_bytes) > 1:
buf = some_bytes[1:]
some_bytes = buf
else:
buf = b''
some_bytes = b''
if self.payload_length == 127:
# This will compute the actual application data size
if len(buf) < 8:
nxt_buf_size = 8 - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while len(some_bytes) < 8:
b = (yield 8 - len(some_bytes))
if b is not None:
some_bytes = some_bytes + b
if len(some_bytes) > 8:
buf = some_bytes[8:]
some_bytes = some_bytes[:8]
else:
some_bytes = buf[:8]
buf = buf[8:]
extended_payload_length = some_bytes
self.payload_length = unpack(
'!Q', extended_payload_length)[0]
if self.payload_length > 0x7FFFFFFFFFFFFFFF:
raise FrameTooLargeException()
elif self.payload_length == 126:
if len(buf) < 2:
nxt_buf_size = 2 - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while len(some_bytes) < 2:
b = (yield 2 - len(some_bytes))
if b is not None:
some_bytes = some_bytes + b
if len(some_bytes) > 2:
buf = some_bytes[2:]
some_bytes = some_bytes[:2]
else:
some_bytes = buf[:2]
buf = buf[2:]
extended_payload_length = some_bytes
self.payload_length = unpack(
'!H', extended_payload_length)[0]
if mask:
if len(buf) < 4:
nxt_buf_size = 4 - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while not some_bytes or len(some_bytes) < 4:
b = (yield 4 - len(some_bytes))
if b is not None:
some_bytes = some_bytes + b
if len(some_bytes) > 4:
buf = some_bytes[4:]
else:
some_bytes = buf[:4]
buf = buf[4:]
self.masking_key = some_bytes
if len(buf) < self.payload_length:
nxt_buf_size = self.payload_length - len(buf)
some_bytes = (yield nxt_buf_size)
some_bytes = buf + (some_bytes or b'')
while len(some_bytes) < self.payload_length:
l = self.payload_length - len(some_bytes)
b = (yield l)
if b is not None:
some_bytes = some_bytes + b
else:
if self.payload_length == len(buf):
some_bytes = buf
else:
some_bytes = buf[:self.payload_length]
self.body = some_bytes
yield
def mask(self, data):
"""
Performs the masking or unmasking operation on data
using the simple masking algorithm:
..
j = i MOD 4
transformed-octet-i = original-octet-i XOR masking-key-octet-j
"""
masked = bytearray(data)
if py3k: key = self.masking_key
else: key = map(ord, self.masking_key)
for i in range(len(data)):
masked[i] = masked[i] ^ key[i%4]
return masked
unmask = mask
|
|
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import os
import sys
from sys import stderr as system_error_stream
# Required to work around weird import error with xarray
from typing import Dict, Any, List, Optional, Sequence, Union
import pkg_resources
import itkConfig
from itk.support.template_class import itkTemplate
def create_itk_module(name: str):
from importlib.util import module_from_spec as ilu_module_from_spec
from importlib.util import spec_from_file_location as ilu_spec_from_file_location
swig_module_name: str = f"itk.{name}Python"
spec = ilu_spec_from_file_location(
swig_module_name,
os.path.join(os.path.dirname(__file__), "..", f"{name}Python.py"),
)
l_module = ilu_module_from_spec(spec)
return l_module
def itk_load_swig_module(name: str, namespace=None):
"""This function causes a SWIG module to be loaded into memory after its
dependencies are satisfied. Information about the templates defined therein
is looked up from a config file, and PyTemplate instances for each are
created. These template_feature instances are placed in a module with the given
name that is either looked up from sys.modules or created and placed there
if it does not already exist.
Optionally, a 'namespace' parameter can be provided. If it is provided,
this namespace will be updated with the new template_feature instantiations.
The raw classes loaded from the named module's SWIG interface are placed in
a 'swig' sub-module. If the namespace parameter is provided, this
information will be placed in a sub-module named 'swig' therein as well.
This later submodule will be created if it does not already exist."""
swig_module_name: str = f"itk.{name}Python"
# find the module's name in sys.modules, or create a new module so named
this_module = sys.modules.setdefault(swig_module_name, create_itk_module(name))
# if this library and its template_feature instantiations have already been loaded
# into sys.modules, bail out after loading the defined symbols into
# 'namespace'
if hasattr(this_module, "__templates_loaded"):
if namespace is not None:
swig = namespace.setdefault("swig", {})
if hasattr(this_module, "swig"):
swig.update(this_module.swig)
# don't worry about overwriting the symbols in namespace -- any
# common symbols should be of type itkTemplate, which is a
# singleton type. That is, they are all identical, so replacing one
# with the other isn't a problem.
for k, v in this_module.__dict__.items():
if not (k.startswith("_") or k.startswith("itk") or k == "swig"):
namespace[k] = v
return
# We're definitely going to load the templates. We set templates_loaded
# here instead of at the end of the file to protect against cyclical
# dependencies that could kill the recursive lookup below.
this_module.__templates_loaded = True
# Now, we definitely need to load the template_feature instantiations from the
# named module, and possibly also load the underlying SWIG module. Before
# we can load the template_feature instantiations of this module, we need to load
# those of the modules on which this one depends. Ditto for the SWIG
# modules.
# So, we recursively satisfy the dependencies of named module and create
# the template_feature instantiations.
# Dependencies are looked up from the auto-generated configuration files,
# via the itk_base_global_module_data instance defined at the bottom of this file, which
# knows how to find those configuration files.
l_data = itk_base_global_module_data[name]
if l_data:
deps = l_data.get_module_dependencies()
for dep in deps:
itk_load_swig_module(dep, namespace)
if itkConfig.ImportCallback:
itkConfig.ImportCallback(name, 0)
# SWIG-generated modules have 'Python' appended. Only load the SWIG module
# if we haven't already.
loader = LibraryLoader()
l_module = loader.load(swig_module_name)
# OK, now the modules on which this one depends are loaded and
# template_feature-instantiated, and the SWIG module for this one is also loaded.
# We're going to put the things we load and create in two places: the
# optional 'namespace' parameter, and the this_module variable's namespace.
# Populate the 'swig' sub-module namespace for this_module. Also look up or create a
# different 'swig' namespace for 'namespace'. Since 'namespace' may be used to
# collect symbols from multiple different ITK modules, we don't want to
# stomp on an existing 'swig' namespace, nor do we want to share 'swig'
# namespaces between this_module and namespace.
if namespace is None:
for k, v in l_module.__dict__.items():
if not (k.startswith("__") or k.startswith("itk")):
this_module.swig[k] = v
else:
swig = namespace.setdefault("swig", {})
for k, v in l_module.__dict__.items():
if not (k.startswith("__") or k.startswith("itk")):
this_module.swig[k] = v
swig[k] = v
l_data: ITKModuleInfo = itk_base_global_module_data[name]
for template_feature in l_data.get_all_template_features():
if template_feature.is_itk_class():
# Get the attribute associated with the class name if it exists,
# otherwise make a new templated class
# template_container = this_module.'py_class_name'
template_container = getattr(
this_module,
template_feature.get_python_class_name(),
# Create a new template_container if not already found
itkTemplate(template_feature.get_cpp_class_name()),
)
try:
template_container.__add__(
template_feature.get_template_parameters(),
getattr(l_module, template_feature.get_swig_class_name()),
)
# Now set the updated template_container to this_module
setattr(
this_module,
template_feature.get_python_class_name(),
template_container,
)
if namespace is not None:
current_value = namespace.get(
template_feature.get_python_class_name()
)
if (
current_value is not None
and current_value != template_container
):
debug_print_error(
f"Namespace already has a value for "
f"{template_feature.get_python_class_name()}, which is not an itkTemplate "
f"instance for class {template_feature.get_cpp_class_name()}. "
f"Overwriting old value."
)
namespace[
template_feature.get_python_class_name()
] = template_container
except Exception as e:
debug_print_error(
f"{template_feature.get_swig_class_name()} not loaded from module {name} because of "
f"exception:\n {e}"
)
pass
else:
# this is a description of a non-templated class
try:
swig_class = getattr(l_module, template_feature.get_swig_class_name())
itkTemplate.registerNoTpl(
template_feature.get_cpp_class_name(), swig_class
)
setattr(
this_module, template_feature.get_python_class_name(), swig_class
)
if namespace is not None:
current_value = namespace.get(
template_feature.get_python_class_name()
)
if current_value is not None and current_value != swig_class:
debug_print_error(
f"Namespace already has a value for"
f" {template_feature.get_python_class_name()}, which is not class {template_feature.get_cpp_class_name()}. "
f"Overwriting old value."
)
namespace[template_feature.get_python_class_name()] = swig_class
except Exception as e:
debug_print_error(
f"{template_feature.get_swig_class_name()} not found in module {name} because of "
f"exception:\n {e}"
)
for snakeCaseFunction in l_data.get_snake_case_functions():
namespace[snakeCaseFunction] = getattr(l_module, snakeCaseFunction)
init_name = snakeCaseFunction + "_init_docstring"
init_function = getattr(l_module, init_name)
try:
init_function()
except AttributeError:
pass
if itkConfig.ImportCallback:
itkConfig.ImportCallback(name, 1)
def debug_print_error(error):
if itkConfig.DebugLevel == itkConfig.WARN:
print(error, file=system_error_stream)
elif itkConfig.DebugLevel == itkConfig.ERROR:
raise RuntimeError(error)
class LibraryLoader(object):
"""Do all the work to set up the environment so that a SWIG-generated
library can be properly loaded. This involves setting paths defined in
itkConfig."""
def __init__(self) -> None:
self.old_path = sys.path
self.old_cwd = os.getcwd()
def setup(self):
self.old_cwd = os.getcwd()
try:
os.chdir(itkConfig.swig_lib)
except OSError:
# silently pass to avoid the case where the dir is not there
pass
self.old_path = sys.path
sys.path = [itkConfig.swig_lib, itkConfig.swig_py] + sys.path
def load(self, name: str):
self.setup()
try:
import importlib
l_module = importlib.import_module(name)
# since version 3.4: Use importlib.util.find_spec() instead.
l_spec = importlib.util.find_spec(name)
l_spec.loader.exec_module(l_module) # pytype: disable=attribute-error
return l_module
finally:
self.cleanup()
def cleanup(self):
os.chdir(self.old_cwd)
sys.path = self.old_path
class ITKTemplateFeatures:
"""
Objects to hold the 'template' features specified in the '*Config.py'
files generated during swig configuration.
(py_class_name, cpp_class_name, swig_class_name, class_in_module, template_parameters)
('Image', 'itk::Image', 'itkImageSS2', True, 'signed short,2'),
"""
def __init__(self, feature_tuple: Sequence[Union[str, bool]]) -> None:
feature_length: int = len(feature_tuple)
# ITK classes have exactly 5 elements in the tuple, otherwise they are swig classes
self._is_itk_class: bool = feature_length == 5
if feature_length < 3 or feature_length > 5:
raise Exception(
f"ERROR: Ivalid number of features specified (3 <= {feature_length} <= 5): {feature_tuple}."
)
self._py_class_name: str = feature_tuple[0]
self._cpp_class_name: str = feature_tuple[1]
self._swig_class_name: str = feature_tuple[2]
self._class_in_module: bool = feature_tuple[3] if feature_length >= 4 else False
self._template_parameters: Optional[str] = (
feature_tuple[4] if feature_length == 5 else None
)
def is_itk_class(self) -> bool:
return self._is_itk_class
def get_python_class_name(self) -> str:
return self._py_class_name
def get_cpp_class_name(self) -> str:
return self._cpp_class_name
def get_swig_class_name(self) -> str:
return self._swig_class_name
def get_class_in_module(self) -> bool:
return self._class_in_module
def get_template_parameters(self) -> str:
return self._template_parameters
class ITKModuleInfo:
"""
A structure to hold information loaded from the *Config.py
files generated during swig wrapping. The *Config.py
files define actual names of the swig wrapped classes
so that they may be used to build convenience dispatch
factories from the itkTemplate base class.
"""
def __init__(self, conf: str, snake_conf) -> None:
import importlib
module_name = os.path.splitext(conf)[0]
content_info = importlib.import_module(f"itk.Configuration.{module_name}")
if hasattr(content_info, "templates"):
_templates = content_info.templates
else:
_templates = tuple()
if hasattr(content_info, "depends"):
self._depends = content_info.depends
else:
self._depends = tuple()
self._template_feature_tuples: List[ITKTemplateFeatures] = [
ITKTemplateFeatures(tfeat) for tfeat in _templates
]
snake_module_name = os.path.splitext(snake_conf)[0]
try:
snake_content_info = importlib.import_module(
f"itk.Configuration.{snake_module_name}"
)
except ImportError:
self._snake_case_functions: Sequence[str] = []
return
if hasattr(snake_content_info, "snake_case_functions"):
self._snake_case_functions: Sequence[
str
] = snake_content_info.snake_case_functions
else:
self._snake_case_functions: Sequence[str] = []
def get_module_dependencies(self) -> Sequence[str]:
return self._depends
def get_all_template_features(self) -> Sequence[ITKTemplateFeatures]:
return self._template_feature_tuples
def get_snake_case_functions(self) -> Sequence[str]:
return self._snake_case_functions
def _initialize(l_module_data):
# Make a list of all know modules (described in *Config.py files in the
# config_py directory) and load the information described in those Config.py
# files.
candidate_config_path: str = os.path.join(itkConfig.path, "Configuration")
if not os.path.isdir(candidate_config_path):
error_message: str = f"WARNING: Invalid configuration directory requested: {candidate_config_path}"
raise RuntimeError(error_message)
files = os.listdir(candidate_config_path)
known_modules: List[str] = sorted(
[f[:-9] for f in files if f.endswith("Config.py")]
)
for module in known_modules:
conf: str = f"{module}Config.py"
snake_conf = f"{module}_snake_case.py"
l_module_data[module] = ITKModuleInfo(conf, snake_conf)
itk_base_global_lazy_attributes: Dict[str, Any] = {}
itk_base_global_module_data: Dict[str, ITKModuleInfo] = {}
_initialize(itk_base_global_module_data)
del _initialize
|
|
import sys
import json
import asyncio
import inspect
import logging
import argparse
import collections
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.cortex as s_cortex
import synapse.telepath as s_telepath
import synapse.lib.storm as s_storm
import synapse.lib.config as s_config
import synapse.lib.output as s_output
import synapse.lib.autodoc as s_autodoc
import synapse.lib.dyndeps as s_dyndeps
import synapse.lib.version as s_version
import synapse.lib.stormsvc as s_stormsvc
import synapse.lib.stormtypes as s_stormtypes
import synapse.tools.genpkg as s_genpkg
logger = logging.getLogger(__name__)
poptsToWords = {
'ex': 'Example',
'ro': 'Read Only',
}
info_ignores = (
'stortype',
'bases',
'custom',
)
raw_back_slash_colon = r'\:'
class DocHelp:
'''
Helper to pre-compute all doc strings hierarchically
'''
def __init__(self, ctors, types, forms, props, univs):
self.ctors = {c[0]: c[3].get('doc', 'BaseType has no doc string.') for c in ctors}
self.types = {t[0]: t[2].get('doc', self.ctors.get(t[1][0])) for t in types}
self.forms = {f[0]: f[1].get('doc', self.types.get(f[0], self.ctors.get(f[0]))) for f in forms}
self.univs = {}
for unam, utyp, unfo in univs:
tn = utyp[0]
doc = unfo.get('doc', self.forms.get(tn, self.types.get(tn, self.ctors.get(tn))))
self.univs[unam] = doc
self.props = {}
for form, props in props.items():
for prop in props:
tn = prop[1][0]
doc = prop[2].get('doc', self.forms.get(tn, self.types.get(tn, self.ctors.get(tn))))
self.props[(form, prop[0])] = doc
typed = {t[0]: t for t in types}
ctord = {c[0]: c for c in ctors}
self.formhelp = {} # form name -> ex string for a given type
for form in forms:
formname = form[0]
tnfo = typed.get(formname)
ctor = ctord.get(formname)
if tnfo:
tnfo = tnfo[2]
example = tnfo.get('ex')
self.formhelp[formname] = example
elif ctor:
ctor = ctor[3]
example = ctor.get('ex')
self.formhelp[formname] = example
else: # pragma: no cover
logger.warning(f'No ctor/type available for [{formname}]')
def processCtors(rst, dochelp, ctors):
'''
Args:
rst (RstHelp):
dochelp (DocHelp):
ctors (list):
Returns:
None
'''
rst.addHead('Base Types', lvl=1, link='.. _dm-base-types:')
rst.addLines('',
'Base types are defined via Python classes.',
'')
for name, ctor, opts, info in ctors:
doc = dochelp.ctors.get(name)
if not doc.endswith('.'):
logger.warning(f'Docstring for ctor {name} does not end with a period.]')
doc = doc + '.'
# Break implicit links to nowhere
hname = name
if ':' in name:
hname = name.replace(':', raw_back_slash_colon)
link = f'.. _dm-type-{name.replace(":", "-")}:'
rst.addHead(hname, lvl=2, link=link)
rst.addLines(doc, f'It is implemented by the following class{raw_back_slash_colon} ``{ctor}``.')
_ = info.pop('doc', None)
ex = info.pop('ex', None)
if ex:
rst.addLines('',
f'An example of ``{name}``{raw_back_slash_colon}',
'',
f' * ``{ex}``',
)
if opts:
rst.addLines('',
f'The base type ``{name}`` has the following default options set:',
''
)
for k, v in opts.items():
rst.addLines(f' * {k}: ``{v}``')
for key in info_ignores:
info.pop(key, None)
if info:
logger.warning(f'Base type {name} has unhandled info: {info}')
def processTypes(rst, dochelp, types):
'''
Args:
rst (RstHelp):
dochelp (DocHelp):
ctors (list):
Returns:
None
'''
rst.addHead('Types', lvl=1, link='.. _dm-types:')
rst.addLines('',
'Regular types are derived from BaseTypes.',
'')
for name, (ttyp, topt), info in types:
doc = dochelp.types.get(name)
if not doc.endswith('.'):
logger.warning(f'Docstring for type {name} does not end with a period.]')
doc = doc + '.'
# Break implicit links to nowhere
hname = name
if ':' in name:
hname = name.replace(':', raw_back_slash_colon)
link = f'.. _dm-type-{name.replace(":", "-")}:'
rst.addHead(hname, lvl=2, link=link)
rst.addLines(doc,
f'The ``{name}`` type is derived from the base type: ``{ttyp}``.')
_ = info.pop('doc', None)
ex = info.pop('ex', None)
if ex:
rst.addLines('',
f'An example of ``{name}``{raw_back_slash_colon}',
'',
f' * ``{ex}``',
)
if topt:
rst.addLines('',
f'The type ``{name}`` has the following options set:',
''
)
for key, valu in sorted(topt.items(), key=lambda x: x[0]):
if key == 'enums':
if valu is None:
continue
lines = [f' * {key}:\n']
elines = []
if isinstance(valu, str):
# handle str
enums = valu.split(',')
header = 'valu'
maxa = max((len(enum) for enum in enums))
maxa = max(maxa, len(header))
seprline = f'+{"-" * maxa}+'
elines.append(seprline)
line = f'+{header}{" " * (maxa - len(header))}+'
elines.append(line)
line = f'+{"=" * maxa}+'
elines.append(line)
for enum in enums:
line = f'+{enum}{" " * (maxa - len(enum))}+'
elines.append(line)
elines.append(seprline)
elif isinstance(valu, (list, tuple)):
# handle enum list
valu = sorted(valu, key=lambda x: x[0])
maxa, maxb = len('int'), len('valu')
for (a, b) in valu:
maxa = max(len(str(a)), maxa)
maxb = max(len(b), maxb)
line = f'{"=" * maxa} {"=" * maxb}'
elines.append(line)
line = f'int{" " * (maxa - 3)} valu{" " * (maxb - 4)}'
elines.append(line)
line = f'{"=" * maxa} {"=" * maxb}'
elines.append(line)
for (a, b) in valu:
line = f'{a}{" " * (maxa - len(str(a)))} {b}{" " * (maxb - len(b))}'
elines.append(line)
line = f'{"=" * maxa} {"=" * maxb}'
elines.append(line)
else: # pragma: no cover
raise ValueError(f'Unknown enum type {type(valu)} for {name}')
elines = [' ' + line for line in elines]
lines.extend(elines)
lines.append('\n')
rst.addLines(*lines)
elif key in ('fields',
'schema',
):
if len(str(valu)) < 80:
rst.addLines(f' * {key}: ``{valu}``')
continue
lines = [f' * {key}:\n', ' ::\n\n']
json_lines = json.dumps(valu, indent=1, sort_keys=True)
json_lines = [' ' + line for line in json_lines.split('\n')]
lines.extend(json_lines)
lines.append('\n')
rst.addLines(*lines)
else:
rst.addLines(f' * {key}: ``{valu}``')
for key in info_ignores:
info.pop(key, None)
if info:
logger.warning(f'Type {name} has unhandled info: {info}')
def processFormsProps(rst, dochelp, forms, univ_names):
rst.addHead('Forms', lvl=1, link='.. _dm-forms:')
rst.addLines('',
'Forms are derived from types, or base types. Forms represent node types in the graph.'
'')
for name, info, props in forms:
doc = dochelp.forms.get(name)
if not doc.endswith('.'):
logger.warning(f'Docstring for form {name} does not end with a period.]')
doc = doc + '.'
hname = name
if ':' in name:
hname = name.replace(':', raw_back_slash_colon)
link = f'.. _dm-form-{name.replace(":", "-")}:'
rst.addHead(hname, lvl=2, link=link)
baseline = f'The base type for the form can be found at :ref:`dm-type-{name.replace(":", "-")}`.'
rst.addLines(doc,
'',
baseline,
'')
ex = dochelp.formhelp.get(name)
if ex:
rst.addLines('',
f'An example of ``{name}``{raw_back_slash_colon}',
'',
f' * ``{ex}``',
''
)
if props:
rst.addLines('Properties:',
)
for pname, (ptname, ptopts), popts in props:
if pname in univ_names:
continue
hpname = pname
if ':' in pname:
hpname = pname.replace(':', raw_back_slash_colon)
_ = popts.pop('doc', None)
doc = dochelp.props.get((name, pname))
if not doc.endswith('.'):
logger.warning(f'Docstring for prop ({name}, {pname}) does not end with a period.]')
doc = doc + '.'
rst.addLines('',
raw_back_slash_colon + hpname + ' / ' + f'{":".join([hname, hpname])}',
' ' + doc,
)
if popts:
rst.addLines(' ' + 'It has the following property options set:',
''
)
for k, v in popts.items():
k = poptsToWords.get(k, k.replace(':', raw_back_slash_colon))
rst.addLines(' ' + f'* {k}: ``{v}``')
hptlink = f'dm-type-{ptname.replace(":", "-")}'
tdoc = f'The property type is :ref:`{hptlink}`.'
rst.addLines('',
' ' + tdoc,
)
if ptopts:
rst.addLines(' ' + "Its type has the following options set:",
'')
for k, v in ptopts.items():
rst.addLines(' ' + f'* {k}: ``{v}``')
def processUnivs(rst, dochelp, univs):
rst.addHead('Universal Properties', lvl=1, link='.. _dm-universal-props:')
rst.addLines('',
'Universal props are system level properties which may be present on every node.',
'',
'These properties are not specific to a particular form and exist outside of a particular',
'namespace.',
'')
for name, (utyp, uopt), info in univs:
_ = info.pop('doc', None)
doc = dochelp.univs.get(name)
if not doc.endswith('.'):
logger.warning(f'Docstring for form {name} does not end with a period.]')
doc = doc + '.'
hname = name
if ':' in name:
hname = name.replace(':', raw_back_slash_colon)
rst.addHead(hname, lvl=2, link=f'.. _dm-univ-{name.replace(":", "-")}:')
rst.addLines('',
doc,
)
if info:
rst.addLines('It has the following property options set:',
''
)
for k, v in info.items():
k = poptsToWords.get(k, k.replace(':', raw_back_slash_colon))
rst.addLines(' ' + f'* {k}: ``{v}``')
hptlink = f'dm-type-{utyp.replace(":", "-")}'
tdoc = f'The universal property type is :ref:`{hptlink}`.'
rst.addLines('',
tdoc,
)
if uopt:
rst.addLines("Its type has the following options set:",
'')
for k, v in uopt.items():
rst.addLines(' ' + f'* {k}: ``{v}``')
async def processStormCmds(rst, pkgname, commands):
'''
Args:
rst (RstHelp):
pkgname (str):
commands (list):
Returns:
None
'''
rst.addHead('Storm Commands', lvl=2)
rst.addLines(f'This package implements the following Storm Commands.\n')
commands = sorted(commands, key=lambda x: x.get('name'))
for cdef in commands:
cname = cdef.get('name')
cdesc = cdef.get('descr')
cargs = cdef.get('cmdargs')
# command names cannot have colons in them thankfully
cref = f'.. _stormcmd-{pkgname.replace(":", "-")}-{cname.replace(".", "-")}:'
rst.addHead(cname, lvl=3, link=cref)
# Form the description
lines = ['::\n']
# Generate help from args
pars = s_storm.Parser(prog=cname, descr=cdesc)
if cargs:
for (argname, arginfo) in cargs:
pars.add_argument(argname, **arginfo)
pars.help()
for line in pars.mesgs:
if '\n' in line:
for subl in line.split('\n'):
lines.append(f' {subl}')
else:
lines.append(f' {line}')
lines.append('\n')
forms = cdef.get('forms', {})
iforms = forms.get('input')
oforms = forms.get('output')
nodedata = forms.get('nodedata')
if iforms:
line = 'The command is aware of how to automatically handle the following forms as input nodes:\n'
lines.append(line)
for form in iforms:
lines.append(f'- ``{form}``')
lines.append('\n')
if oforms:
line = 'The command may make the following types of nodes in the graph as a result of its execution:\n'
lines.append(line)
for form in oforms:
lines.append(f'- ``{form}``')
lines.append('\n')
if nodedata:
line = 'The command may add nodedata with the following keys to the corresponding forms:\n'
lines.append(line)
for key, form in nodedata:
lines.append(f'- ``{key}`` on ``{form}``')
lines.append('\n')
rst.addLines(*lines)
async def docModel(outp,
core):
coreinfo = await core.getCoreInfo()
_, model = coreinfo.get('modeldef')[0]
ctors = model.get('ctors')
types = model.get('types')
forms = model.get('forms')
univs = model.get('univs')
props = collections.defaultdict(list)
ctors = sorted(ctors, key=lambda x: x[0])
univs = sorted(univs, key=lambda x: x[0])
types = sorted(types, key=lambda x: x[0])
forms = sorted(forms, key=lambda x: x[0])
univ_names = {univ[0] for univ in univs}
for fname, fnfo, fprops in forms:
for prop in fprops:
props[fname].append(prop)
[v.sort() for k, v in props.items()]
dochelp = DocHelp(ctors, types, forms, props, univs)
# Validate examples
for form, example in dochelp.formhelp.items():
if example is None:
continue
if example.startswith('('):
q = f"[{form}={example}]"
else:
q = f"[{form}='{example}']"
node = False
async for (mtyp, mnfo) in core.storm(q, {'editformat': 'none'}):
if mtyp in ('init', 'fini'):
continue
if mtyp == 'err': # pragma: no cover
raise s_exc.SynErr(mesg='Invalid example', form=form, example=example, info=mnfo)
if mtyp == 'node':
node = True
if not node: # pramga: no cover
raise s_exc.SynErr(mesg='Unable to make a node from example.', form=form, example=example)
rst = s_autodoc.RstHelp()
rst.addHead('Synapse Data Model - Types', lvl=0)
processCtors(rst, dochelp, ctors)
processTypes(rst, dochelp, types)
rst2 = s_autodoc.RstHelp()
rst2.addHead('Synapse Data Model - Forms', lvl=0)
processFormsProps(rst2, dochelp, forms, univ_names)
processUnivs(rst2, dochelp, univs)
return rst, rst2
async def docConfdefs(ctor, reflink=':ref:`devops-cell-config`', doc_title=None):
cls = s_dyndeps.tryDynLocal(ctor)
if not hasattr(cls, 'confdefs'):
raise Exception('ctor must have a confdefs attr')
rst = s_autodoc.RstHelp()
clsname = cls.__name__
conf = cls.initCellConf() # type: s_config.Config
if doc_title is None:
doc_title = clsname
rst.addHead(f'{doc_title} Configuration Options', lvl=0, link=f'.. _autodoc-{clsname.lower()}-conf:')
rst.addLines(f'The following are boot-time configuration options for the cell.')
rst.addLines(f'See {reflink} for details on how to set these options.')
# access raw config data
# Get envar and argparse mapping
name2envar = conf.getEnvarMapping()
name2cmdline = conf.getCmdlineMapping()
schema = conf.json_schema.get('properties', {})
for name, conf in sorted(schema.items(), key=lambda x: x[0]):
if conf.get('hideconf'):
continue
nodesc = f'No description available for ``{name}``.'
hname = name
if ':' in name:
hname = name.replace(':', raw_back_slash_colon)
rst.addHead(hname, lvl=1)
desc = conf.get('description', nodesc)
if not desc.endswith('.'): # pragma: no cover
logger.warning(f'Description for [{name}] is missing a period.')
lines = []
lines.append(desc)
extended_description = conf.get('extended_description')
if extended_description:
lines.append('\n')
lines.append(extended_description)
# Type/additional information
lines.append('\n')
ctyp = conf.get('type')
lines.append('Type')
lines.append(f' ``{ctyp}``\n')
if ctyp == 'object':
if conf.get('properties'):
lines.append('Properties')
lines.append(' The object expects the following properties:')
data = {k: v for k, v in conf.items() if k not in (
'description', 'default', 'type', 'hideconf', 'hidecmdl',
)}
parts = json.dumps(data, sort_keys=True, indent=2).split('\n')
lines.append(' ::')
lines.append('\n')
lines.extend([f' {p}' for p in parts])
lines.append('\n')
defval = conf.get('default', s_common.novalu)
if defval is not s_common.novalu:
lines.append('Default Value')
lines.append(f' ``{repr(defval)}``\n')
envar = name2envar.get(name)
if envar:
lines.append('Environment Variable')
lines.append(f' ``{envar}``\n')
cmdline = name2cmdline.get(name)
if cmdline:
lines.append('Command Line Argument')
lines.append(f' ``--{cmdline}``\n')
rst.addLines(*lines)
return rst, clsname
async def docStormsvc(ctor):
cls = s_dyndeps.tryDynLocal(ctor)
if not hasattr(cls, 'cellapi'):
raise Exception('ctor must have a cellapi attr')
clsname = cls.__name__
cellapi = cls.cellapi
if not issubclass(cellapi, s_stormsvc.StormSvc):
raise Exception('cellapi must be a StormSvc implementation')
# Make a dummy object
class MockSess:
def __init__(self):
self.user = None
class DummyLink:
def __init__(self):
self.info = {'sess': MockSess()}
def get(self, key):
return self.info.get(key)
async with await cellapi.anit(s_common.novalu, DummyLink(), s_common.novalu) as obj:
svcinfo = await obj.getStormSvcInfo()
rst = s_autodoc.RstHelp()
# Disable default python highlighting
rst.addLines('.. highlight:: none\n')
rst.addHead(f'{clsname} Storm Service')
lines = ['The following Storm Packages and Commands are available from this service.',
f'This documentation is generated for version '
f'{s_version.fmtVersion(*svcinfo.get("vers"))} of the service.',
f'The Storm Service name is ``{svcinfo.get("name")}``.',
]
rst.addLines(*lines)
for pkg in svcinfo.get('pkgs'):
pname = pkg.get('name')
pver = pkg.get('version')
commands = pkg.get('commands')
hname = pname
if ':' in pname:
hname = pname.replace(':', raw_back_slash_colon)
rst.addHead(f'Storm Package\\: {hname}', lvl=1)
rst.addLines(f'This documentation for {pname} is generated for version {s_version.fmtVersion(*pver)}')
if commands:
await processStormCmds(rst, pname, commands)
# TODO: Modules are not currently documented.
return rst, clsname
async def docStormpkg(pkgpath):
pkgdef = s_genpkg.loadPkgProto(pkgpath)
pkgname = pkgdef.get('name')
rst = s_autodoc.RstHelp()
# Disable default python highlighting
rst.addLines('.. highlight:: none\n')
hname = pkgname
if ':' in pkgname:
hname = pkgname.replace(':', raw_back_slash_colon)
rst.addHead(f'Storm Package\\: {hname}')
lines = ['The following Commands are available from this package.',
f'This documentation is generated for version '
f'{s_version.fmtVersion(*pkgdef.get("version"))} of the package.',
]
rst.addLines(*lines)
commands = pkgdef.get('commands')
if commands:
await processStormCmds(rst, pkgname, commands)
# TODO: Modules are not currently documented.
return rst, pkgname
async def docStormTypes():
registry = s_stormtypes.registry
libsinfo = registry.getLibDocs()
libspage = s_autodoc.RstHelp()
libspage.addHead('Storm Libraries', lvl=0, link='.. _stormtypes-libs-header:')
lines = (
'',
'Storm Libraries represent powerful tools available inside of the Storm query language.',
''
)
libspage.addLines(*lines)
# This value is appended to the end of the ref to the first level header of a type.
# This prevents accidental cross linking between parts of the docs; which can happen
# when secondary properties of a type may overlap with the main name of the type.
types_suffix = 'f527'
s_autodoc.docStormTypes(libspage, libsinfo, linkprefix='stormlibs', islib=True,
known_types=registry.known_types, types_prefix='stormprims', types_suffix=types_suffix)
priminfo = registry.getTypeDocs()
typespage = s_autodoc.RstHelp()
typespage.addHead('Storm Types', lvl=0, link='.. _stormtypes-prim-header:')
lines = (
'',
'Storm Objects are used as view objects for manipulating data in the Storm Runtime and in the Cortex itself.'
''
)
typespage.addLines(*lines)
s_autodoc.docStormTypes(typespage, priminfo, linkprefix='stormprims', known_types=registry.known_types,
types_prefix='stormprims', types_suffix=types_suffix)
return libspage, typespage
async def main(argv, outp=None):
if outp is None:
outp = s_output.OutPut()
pars = makeargparser()
opts = pars.parse_args(argv)
if opts.doc_model:
if opts.cortex:
async with await s_telepath.openurl(opts.cortex) as core:
rsttypes, rstforms = await docModel(outp, core)
else:
async with s_cortex.getTempCortex() as core:
rsttypes, rstforms = await docModel(outp, core)
if opts.savedir:
with open(s_common.genpath(opts.savedir, 'datamodel_types.rst'), 'wb') as fd:
fd.write(rsttypes.getRstText().encode())
with open(s_common.genpath(opts.savedir, 'datamodel_forms.rst'), 'wb') as fd:
fd.write(rstforms.getRstText().encode())
if opts.doc_conf:
confdocs, cname = await docConfdefs(opts.doc_conf,
reflink=opts.doc_conf_reflink,
doc_title=opts.doc_conf_title,
)
if opts.savedir:
with open(s_common.genpath(opts.savedir, f'conf_{cname.lower()}.rst'), 'wb') as fd:
fd.write(confdocs.getRstText().encode())
if opts.doc_storm:
confdocs, svcname = await docStormsvc(opts.doc_storm)
if opts.savedir:
with open(s_common.genpath(opts.savedir, f'stormsvc_{svcname.lower()}.rst'), 'wb') as fd:
fd.write(confdocs.getRstText().encode())
if opts.doc_stormpkg:
pkgdocs, pkgname = await docStormpkg(opts.doc_stormpkg)
if opts.savedir:
with open(s_common.genpath(opts.savedir, f'stormpkg_{pkgname.lower()}.rst'), 'wb') as fd:
fd.write(pkgdocs.getRstText().encode())
if opts.doc_stormtypes:
libdocs, typedocs = await docStormTypes()
if opts.savedir:
with open(s_common.genpath(opts.savedir, f'stormtypes_libs.rst'), 'wb') as fd:
fd.write(libdocs.getRstText().encode())
with open(s_common.genpath(opts.savedir, f'stormtypes_prims.rst'), 'wb') as fd:
fd.write(typedocs.getRstText().encode())
return 0
def makeargparser():
desc = 'Command line tool to generate various synapse documentation.'
pars = argparse.ArgumentParser('synapse.tools.autodoc', description=desc)
pars.add_argument('--cortex', '-c', default=None,
help='Cortex URL for model inspection')
pars.add_argument('--savedir', default=None,
help='Save output to the given directory')
doc_type = pars.add_mutually_exclusive_group()
doc_type.add_argument('--doc-model', action='store_true', default=False,
help='Generate RST docs for the DataModel within a cortex')
doc_type.add_argument('--doc-conf', default=None,
help='Generate RST docs for the Confdefs for a given Cell ctor')
pars.add_argument('--doc-conf-reflink', default=':ref:`devops-cell-config`',
help='Reference link for how to set the cell configuration options.')
pars.add_argument('--doc-conf-title', default=None, type=str,
help='Use a custom string for the document title.')
doc_type.add_argument('--doc-storm', default=None,
help='Generate RST docs for a stormssvc implemented by a given Cell')
doc_type.add_argument('--doc-stormpkg', default=None,
help='Generate RST docs for the specified Storm package YAML file.')
doc_type.add_argument('--doc-stormtypes', default=None, action='store_true',
help='Generate RST docs for StormTypes')
return pars
if __name__ == '__main__': # pragma: no cover
s_common.setlogging(logger, 'DEBUG')
asyncio.run(main(sys.argv[1:]))
|
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
merge_ordered,
)
import pandas._testing as tm
class TestMergeOrdered:
def setup_method(self):
self.left = DataFrame({"key": ["a", "c", "e"], "lvalue": [1, 2.0, 3]})
self.right = DataFrame({"key": ["b", "c", "d", "f"], "rvalue": [1, 2, 3.0, 4]})
def test_basic(self):
result = merge_ordered(self.left, self.right, on="key")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1, np.nan, 2, np.nan, 3, np.nan],
"rvalue": [np.nan, 1, 2, 3, np.nan, 4],
}
)
tm.assert_frame_equal(result, expected)
def test_ffill(self):
result = merge_ordered(self.left, self.right, on="key", fill_method="ffill")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1.0, 1, 2, 2, 3, 3.0],
"rvalue": [np.nan, 1, 2, 3, 3, 4],
}
)
tm.assert_frame_equal(result, expected)
def test_multigroup(self):
left = pd.concat([self.left, self.left], ignore_index=True)
left["group"] = ["a"] * 3 + ["b"] * 3
result = merge_ordered(
left, self.right, on="key", left_by="group", fill_method="ffill"
)
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"] * 2,
"lvalue": [1.0, 1, 2, 2, 3, 3.0] * 2,
"rvalue": [np.nan, 1, 2, 3, 3, 4] * 2,
}
)
expected["group"] = ["a"] * 6 + ["b"] * 6
tm.assert_frame_equal(result, expected.loc[:, result.columns])
result2 = merge_ordered(
self.right, left, on="key", right_by="group", fill_method="ffill"
)
tm.assert_frame_equal(result, result2.loc[:, result.columns])
result = merge_ordered(left, self.right, on="key", left_by="group")
assert result["group"].notna().all()
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.left)
result = nad.merge(self.right, on="key")
assert isinstance(result, NotADataFrame)
def test_empty_sequence_concat(self):
# GH 9157
empty_pat = "[Nn]o objects"
none_pat = "objects.*None"
test_cases = [
((), empty_pat),
([], empty_pat),
({}, empty_pat),
([None], none_pat),
([None, None], none_pat),
]
for df_seq, pattern in test_cases:
with pytest.raises(ValueError, match=pattern):
pd.concat(df_seq)
pd.concat([DataFrame()])
pd.concat([None, DataFrame()])
pd.concat([DataFrame(), None])
def test_doc_example(self):
left = DataFrame(
{
"group": list("aaabbb"),
"key": ["a", "c", "e", "a", "c", "e"],
"lvalue": [1, 2, 3] * 2,
}
)
right = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]})
result = merge_ordered(left, right, fill_method="ffill", left_by="group")
expected = DataFrame(
{
"group": list("aaaaabbbbb"),
"key": ["a", "b", "c", "d", "e"] * 2,
"lvalue": [1, 1, 2, 2, 3] * 2,
"rvalue": [np.nan, 1, 2, 3, 3] * 2,
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left, right, on, left_by, right_by, expected",
[
(
DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}),
DataFrame({"T": [2], "E": [1]}),
["T"],
["G", "H"],
None,
DataFrame(
{
"G": ["g"] * 3,
"H": ["h"] * 3,
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
}
),
),
(
DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}),
DataFrame({"T": [2], "E": [1]}),
"T",
["G", "H"],
None,
DataFrame(
{
"G": ["g"] * 3,
"H": ["h"] * 3,
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
}
),
),
(
DataFrame({"T": [2], "E": [1]}),
DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}),
["T"],
None,
["G", "H"],
DataFrame(
{
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
"G": ["g"] * 3,
"H": ["h"] * 3,
}
),
),
],
)
def test_list_type_by(self, left, right, on, left_by, right_by, expected):
# GH 35269
result = merge_ordered(
left=left,
right=right,
on=on,
left_by=left_by,
right_by=right_by,
)
tm.assert_frame_equal(result, expected)
def test_left_by_length_equals_to_right_shape0(self):
# GH 38166
left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHE"))
right = DataFrame([[2, 1]], columns=list("ET"))
result = merge_ordered(left, right, on="E", left_by=["G", "H"])
expected = DataFrame(
{"G": ["g"] * 3, "H": ["h"] * 3, "E": [1, 2, 3], "T": [np.nan, 1.0, np.nan]}
)
tm.assert_frame_equal(result, expected)
def test_elements_not_in_by_but_in_df(self):
# GH 38167
left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHE"))
right = DataFrame([[2, 1]], columns=list("ET"))
msg = r"\{'h'\} not found in left columns"
with pytest.raises(KeyError, match=msg):
merge_ordered(left, right, on="E", left_by=["G", "h"])
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright (c) 2009-2013 Stanford University
#
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Note in order for this module to work you must have libramcloud.so
# somewhere in a system library path and have run /sbin/ldconfig since
# installing it
import ctypes
import os
class RejectRules(ctypes.Structure):
_fields_ = [("given_version", ctypes.c_uint64),
("object_doesnt_exist", ctypes.c_uint8, 8),
("object_exists", ctypes.c_uint8, 8),
("version_eq_given", ctypes.c_uint8, 8),
("version_gt_given", ctypes.c_uint8, 8),
]
def _as_tuple(self):
return (self.object_doesnt_exist, self.object_exists,
self.version_eq_given, self.version_gt_given,
self.given_version)
def __lt__(self, other):
return self._as_tuple() < other._as_tuple()
def __repr__(self):
return 'ramcloud.RejectRules(%s)' % str(self._as_tuple())
@staticmethod
def exactly(want_version):
return RejectRules(object_doesnt_exist=True, version_gt_given=True,
given_version=want_version)
def get_library_path():
path = None
if 'LD_LIBRARY_PATH' in os.environ:
for search_dir in os.environ['LD_LIBRARY_PATH'].split(':'):
test_path = os.path.join(search_dir, 'libramcloud.so')
if os.path.exists(test_path):
path = test_path
break
if not path:
path = ctypes.util.find_library('ramcloud')
return path
def load_so():
not_found = ImportError("Couldn't find libramcloud.so, ensure it is "
"installed and that you have registered it with "
"/sbin/ldconfig")
# try to find the overridden path first, if possible using
# LD_LIBRARY_PATH which means we don't have to install the so
# during devel
path = get_library_path()
if not path:
raise not_found
try:
so = ctypes.cdll.LoadLibrary(path)
except OSError as e:
if 'No such file or directory' in str(e):
raise not_found
else:
raise
def malloc_errcheck(result, func, arguments):
if result == 0:
raise MemoryError()
return result
# ctypes.c_bool was introduced in Python 2.6
if not hasattr(ctypes, 'c_bool'):
class c_bool_compat(ctypes.c_uint8):
def __init__(self, value=None):
if value:
ctypes.c_uint8.__init__(self, 1)
else:
ctypes.c_uint8.__init__(self, 0)
@staticmethod
def from_param(param):
if param:
return ctypes.c_uint8(1)
else:
return ctypes.c_uint8(0)
ctypes.c_bool = c_bool_compat
# argument types aliased to their names for sanity
# alphabetical order
address = ctypes.c_char_p
buf = ctypes.c_void_p
data = ctypes.c_void_p
client = ctypes.c_void_p
enumerationState = ctypes.c_void_p
enum_key = ctypes.c_void_p
key = ctypes.c_char_p
keyLength = ctypes.c_uint16
keyLen = ctypes.c_uint32
len = ctypes.c_uint32
dataLength = ctypes.c_uint32
keysOnly = ctypes.c_uint32
name = ctypes.c_char_p
nanoseconds = ctypes.c_uint64
rejectRules = ctypes.POINTER(RejectRules)
serviceLocator = ctypes.c_char_p
status = ctypes.c_int
table = ctypes.c_uint64
version = ctypes.c_uint64
serverId = ctypes.c_uint64
so.rc_connect.argtypes = [address, address, ctypes.POINTER(client)]
so.rc_connect.restype = status
so.rc_disconnect.argtypes = [client]
so.rc_disconnect.restype = None
so.rc_createTable.argtypes = [client, name]
so.rc_createTable.restype = status
so.rc_dropTable.argtypes = [client, name]
so.rc_dropTable.restype = status
so.rc_getStatus.argtypes = []
so.rc_getStatus.restype = status
so.rc_getTableId.argtypes = [client, name, ctypes.POINTER(table)]
so.rc_getTableId.restype = status
so.rc_enumerateTablePrepare.argtypes = [client, table, keysOnly,
ctypes.POINTER(enumerationState)]
so.rc_enumerateTablePrepare.restype = None
so.rc_enumerateTableNext.argtypes = [client, enumerationState,
ctypes.POINTER(keyLen),
ctypes.POINTER(enum_key),
ctypes.POINTER(dataLength),
ctypes.POINTER(data)]
so.rc_enumerateTableNextrestype = status
so.rc_enumerateTableFinalize.argtypes = [enumerationState]
so.rc_enumerateTableFinalize.restype = None
so.rc_read.argtypes = [client, table, key, keyLength, rejectRules,
ctypes.POINTER(version), buf, len,
ctypes.POINTER(len)]
so.rc_read.restype = status
so.rc_remove.argtypes = [client, table, key, keyLength, rejectRules,
ctypes.POINTER(version)]
so.rc_remove.restype = status
so.rc_write.argtypes = [client, table, key, keyLength, buf, len,
rejectRules, ctypes.POINTER(version)]
so.rc_write.restype = status
so.rc_testing_kill.argtypes = [client, table, key, keyLength]
so.rc_testing_kill.restype = status
so.rc_testing_fill.argtypes = [client, table, key, keyLength,
ctypes.c_uint32, ctypes.c_uint32]
so.rc_testing_fill.restype = status
so.rc_testing_get_server_id.argtypes = [client, table, key, keyLength,
ctypes.POINTER(serverId)]
so.rc_testing_get_server_id.restype = status
so.rc_testing_get_service_locator.argtypes = [client, table, key,
keyLength, serviceLocator,
ctypes.c_size_t]
so.rc_testing_get_service_locator.restype = status
so.rc_set_runtime_option.argtypes = [client,
ctypes.c_char_p,
ctypes.c_char_p]
so.rc_set_runtime_option.restype = status
so.rc_testing_wait_for_all_tablets_normal.argtypes = [client, nanoseconds]
so.rc_testing_wait_for_all_tablets_normal.restype = None
so.rc_set_log_file.argtypes = [ctypes.c_char_p]
so.rc_set_log_file.restype = None
return so
def _ctype_copy(addr, var, width):
ctypes.memmove(addr, ctypes.addressof(var), width)
return addr + width
def get_key(id):
if type(id) is int:
return str(id)
else:
return id
def get_keyLength(id):
return len(str(id))
class RCException(Exception):
def __init__(self, status):
super(RCException, self).__init__('RAMCloud error ' + str(status))
self.status = status
pass
class NoObjectError(Exception):
pass
class ObjectExistsError(Exception):
pass
class VersionError(Exception):
def __init__(self, want_version, got_version):
Exception.__init__(self, "Bad version: want %d but got %d" %
(want_version, got_version))
self.want_version = want_version
self.got_version = got_version
class RAMCloud(object):
def __init__(self):
self.client = ctypes.c_void_p()
self.hook = lambda: None
def __del__(self):
if self.client.value is not None:
so.rc_disconnect(self.client)
def handle_error(self, status, actual_version=0, given_version=0):
if status == 0:
return
if status == 2:
raise NoObjectError()
if status == 3:
raise ObjectExistsError()
if status == 5:
raise VersionError(given_version, actual_version)
raise RCException(status)
def connect(self, serverLocator='fast+udp:host=127.0.0.1,port=12246',
clusterName='main'):
s = so.rc_connect(serverLocator, clusterName,
ctypes.byref(self.client))
self.handle_error(s)
def enumerate_table_prepare(self, table_id):
enumeration_state = ctypes.c_void_p()
so.rc_enumerateTablePrepare(self.client, table_id, 0,
ctypes.byref(enumeration_state))
return enumeration_state
def enumerate_table_next(self, enumeration_state):
key_length = ctypes.c_uint32()
data_length = ctypes.c_uint32()
data = ctypes.c_void_p()
key = ctypes.c_void_p()
s = so.rc_enumerateTableNext(self.client, enumeration_state,
ctypes.byref(key_length),
ctypes.byref(key),
ctypes.byref(data_length),
ctypes.byref(data))
key_l = key_length.value
data_l = data_length.value
dataPtr = ctypes.cast(data, ctypes.POINTER(ctypes.c_char))
keyPtr = ctypes.cast(key, ctypes.POINTER(ctypes.c_char))
data_s = ''
key_s = ''
if (key_l != 0):
for i in range(0, data_l):
data_s = data_s + dataPtr[i]
for i in range(0, key_l):
key_s = key_s + keyPtr[i]
self.handle_error(s)
return (key_s, data_s)
def enumerate_table_finalize(self, enumeration_state):
so.rc_enumerateTableFinalize(enumeration_state)
def create(self, table_id, id, data):
reject_rules = RejectRules(object_exists=True)
return self.write_rr(table_id, id, data, reject_rules)
def create_table(self, name, serverSpan=1):
s = so.rc_createTable(self.client, name, serverSpan)
self.handle_error(s)
def delete(self, table_id, id, want_version=None):
if want_version:
reject_rules = RejectRules.exactly(want_version)
else:
reject_rules = RejectRules(object_doesnt_exist=True)
return self.delete_rr(table_id, id, reject_rules)
def delete_rr(self, table_id, id, reject_rules):
got_version = ctypes.c_uint64()
self.hook()
s = so.rc_remove(self.client, table_id, get_key(id), get_keyLength(id),
ctypes.byref(reject_rules), ctypes.byref(got_version))
self.handle_error(s, got_version.value)
return got_version.value
def drop_table(self, name):
s = so.rc_dropTable(self.client, name)
self.handle_error(s)
def get_table_id(self, name):
handle = ctypes.c_uint64()
s = so.rc_getTableId(self.client, name, ctypes.byref(handle))
self.handle_error(s)
return handle.value
def ping(self, serviceLocator, nonce, nanoseconds):
result = ctypes.c_uint64()
s = so.rc_ping(self.client, serviceLocator, nonce, nanoseconds,
ctypes.byref(result))
self.handle_error(s)
return result
def read(self, table_id, id, want_version=None):
if want_version:
reject_rules = RejectRules.exactly(want_version)
else:
reject_rules = RejectRules(object_doesnt_exist=True)
return self.read_rr(table_id, id, reject_rules)
def read_rr(self, table_id, id, reject_rules):
max_length = 1024 * 1024 * 2
buf = ctypes.create_string_buffer(max_length)
actual_length = ctypes.c_uint32()
got_version = ctypes.c_uint64()
reject_rules.object_doesnt_exist = False
self.hook()
s = so.rc_read(self.client, table_id, get_key(id), get_keyLength(id),
ctypes.byref(reject_rules),
ctypes.byref(got_version), ctypes.byref(buf),
max_length, ctypes.byref(actual_length))
self.handle_error(s, got_version.value)
return (buf.raw[0:actual_length.value], got_version.value)
def update(self, table_id, id, data, want_version=None):
if want_version:
reject_rules = RejectRules.exactly(want_version)
else:
reject_rules = RejectRules(object_doesnt_exist=True)
return self.write_rr(table_id, id, data, reject_rules)
def write(self, table_id, id, data, want_version=None):
if want_version:
reject_rules = RejectRules(version_gt_given=True,
given_version=want_version)
else:
reject_rules = RejectRules()
return self.write_rr(table_id, id, data, reject_rules)
def write_rr(self, table_id, id, data, reject_rules):
got_version = ctypes.c_uint64()
self.hook()
s = so.rc_write(self.client, table_id, get_key(id), get_keyLength(id),
data, len(data),
ctypes.byref(reject_rules), ctypes.byref(got_version))
self.handle_error(s, got_version.value)
return got_version.value
def testing_kill(self, table_id, id):
s = so.rc_testing_kill(self.client, table_id,
get_key(id), get_keyLength(id))
self.handle_error(s)
def testing_fill(self, table_id, id, object_count, object_size):
s = so.rc_testing_fill(self.client, table_id,
get_key(id), get_keyLength(id),
object_count, object_size)
self.handle_error(s)
def testing_get_server_id(self, table_id, id):
cserver_id = ctypes.c_uint64()
s = so.rc_testing_get_server_id(self.client, table_id, get_key(id),
get_keyLength(id),
ctypes.byref(cserver_id))
self.handle_error(s)
return cserver_id.value
def testing_get_service_locator(self, table_id, id):
max_len = 128
buffer = ctypes.create_string_buffer(max_len)
s = so.rc_testing_get_service_locator(self.client,
table_id, get_key(id),
get_keyLength(id),
buffer, max_len)
self.handle_error(s)
return buffer.value
def testing_set_runtime_option(self, option, value):
so.rc_set_runtime_option(self.client, option, value)
def testing_wait_for_all_tablets_normal(self, timeoutNs=2 ** 64 - 1):
so.rc_testing_wait_for_all_tablets_normal(self.client, timeoutNs)
def set_log_file(self, path):
so.rc_set_log_file(path)
so = load_so()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import processrequest
from .fhirdate import FHIRDate
class ProcessRequestTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ProcessRequest", js["resourceType"])
return processrequest.ProcessRequest(js)
def testProcessRequest1(self):
inst = self.instantiate_from("processrequest-example-poll-eob.json")
self.assertIsNotNone(inst, "Must have instantiated a ProcessRequest instance")
self.implProcessRequest1(inst)
js = inst.as_json()
self.assertEqual("ProcessRequest", js["resourceType"])
inst2 = processrequest.ProcessRequest(js)
self.implProcessRequest1(inst2)
def implProcessRequest1(self, inst):
self.assertEqual(inst.action, "poll")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.id, "1115")
self.assertEqual(inst.identifier[0].system, "http://www.phr.com/patient/12345/processrequest")
self.assertEqual(inst.identifier[0].value, "115")
self.assertEqual(inst.include[0], "ExplanationOfBenefit")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Poll ProcessRequest</div>")
self.assertEqual(inst.text.status, "generated")
def testProcessRequest2(self):
inst = self.instantiate_from("processrequest-example-poll-exclusive.json")
self.assertIsNotNone(inst, "Must have instantiated a ProcessRequest instance")
self.implProcessRequest2(inst)
js = inst.as_json()
self.assertEqual("ProcessRequest", js["resourceType"])
inst2 = processrequest.ProcessRequest(js)
self.implProcessRequest2(inst2)
def implProcessRequest2(self, inst):
self.assertEqual(inst.action, "poll")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.exclude[0], "Communication")
self.assertEqual(inst.exclude[1], "PaymentReconciliation")
self.assertEqual(inst.id, "1113")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/processrequest")
self.assertEqual(inst.identifier[0].value, "113")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Poll ProcessRequest</div>")
self.assertEqual(inst.text.status, "generated")
def testProcessRequest3(self):
inst = self.instantiate_from("processrequest-example-poll-inclusive.json")
self.assertIsNotNone(inst, "Must have instantiated a ProcessRequest instance")
self.implProcessRequest3(inst)
js = inst.as_json()
self.assertEqual("ProcessRequest", js["resourceType"])
inst2 = processrequest.ProcessRequest(js)
self.implProcessRequest3(inst2)
def implProcessRequest3(self, inst):
self.assertEqual(inst.action, "poll")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.id, "1112")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/processrequest")
self.assertEqual(inst.identifier[0].value, "112")
self.assertEqual(inst.include[0], "PaymentReconciliation")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Poll ProcessRequest</div>")
self.assertEqual(inst.text.status, "generated")
def testProcessRequest4(self):
inst = self.instantiate_from("processrequest-example-poll-payrec.json")
self.assertIsNotNone(inst, "Must have instantiated a ProcessRequest instance")
self.implProcessRequest4(inst)
js = inst.as_json()
self.assertEqual("ProcessRequest", js["resourceType"])
inst2 = processrequest.ProcessRequest(js)
self.implProcessRequest4(inst2)
def implProcessRequest4(self, inst):
self.assertEqual(inst.action, "poll")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.id, "1114")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/processrequest")
self.assertEqual(inst.identifier[0].value, "114")
self.assertEqual(inst.include[0], "PaymentReconciliation")
self.assertEqual(inst.period.end.date, FHIRDate("2014-08-20").date)
self.assertEqual(inst.period.end.as_json(), "2014-08-20")
self.assertEqual(inst.period.start.date, FHIRDate("2014-08-10").date)
self.assertEqual(inst.period.start.as_json(), "2014-08-10")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Poll ProcessRequest</div>")
self.assertEqual(inst.text.status, "generated")
def testProcessRequest5(self):
inst = self.instantiate_from("processrequest-example-poll-specific.json")
self.assertIsNotNone(inst, "Must have instantiated a ProcessRequest instance")
self.implProcessRequest5(inst)
js = inst.as_json()
self.assertEqual("ProcessRequest", js["resourceType"])
inst2 = processrequest.ProcessRequest(js)
self.implProcessRequest5(inst2)
def implProcessRequest5(self, inst):
self.assertEqual(inst.action, "poll")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.id, "1111")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/processrequest")
self.assertEqual(inst.identifier[0].value, "111")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Poll ProcessRequest</div>")
self.assertEqual(inst.text.status, "generated")
def testProcessRequest6(self):
inst = self.instantiate_from("processrequest-example-reprocess.json")
self.assertIsNotNone(inst, "Must have instantiated a ProcessRequest instance")
self.implProcessRequest6(inst)
js = inst.as_json()
self.assertEqual("ProcessRequest", js["resourceType"])
inst2 = processrequest.ProcessRequest(js)
self.implProcessRequest6(inst2)
def implProcessRequest6(self, inst):
self.assertEqual(inst.action, "reprocess")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.id, "44654")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/processrequest")
self.assertEqual(inst.identifier[0].value, "44543")
self.assertEqual(inst.item[0].sequenceLinkId, 1)
self.assertEqual(inst.reference, "ABC12345G")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the ReProcess ProcessRequest resource.</div>")
self.assertEqual(inst.text.status, "generated")
def testProcessRequest7(self):
inst = self.instantiate_from("processrequest-example-reverse.json")
self.assertIsNotNone(inst, "Must have instantiated a ProcessRequest instance")
self.implProcessRequest7(inst)
js = inst.as_json()
self.assertEqual("ProcessRequest", js["resourceType"])
inst2 = processrequest.ProcessRequest(js)
self.implProcessRequest7(inst2)
def implProcessRequest7(self, inst):
self.assertEqual(inst.action, "cancel")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.id, "87654")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/processrequest")
self.assertEqual(inst.identifier[0].value, "76543")
self.assertFalse(inst.nullify)
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Reversal ProcessRequest</div>")
self.assertEqual(inst.text.status, "generated")
def testProcessRequest8(self):
inst = self.instantiate_from("processrequest-example-status.json")
self.assertIsNotNone(inst, "Must have instantiated a ProcessRequest instance")
self.implProcessRequest8(inst)
js = inst.as_json()
self.assertEqual("ProcessRequest", js["resourceType"])
inst2 = processrequest.ProcessRequest(js)
self.implProcessRequest8(inst2)
def implProcessRequest8(self, inst):
self.assertEqual(inst.action, "status")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.id, "87655")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/processrequest")
self.assertEqual(inst.identifier[0].value, "1776543")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Status ProcessRequest</div>")
self.assertEqual(inst.text.status, "generated")
def testProcessRequest9(self):
inst = self.instantiate_from("processrequest-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ProcessRequest instance")
self.implProcessRequest9(inst)
js = inst.as_json()
self.assertEqual("ProcessRequest", js["resourceType"])
inst2 = processrequest.ProcessRequest(js)
self.implProcessRequest9(inst2)
def implProcessRequest9(self, inst):
self.assertEqual(inst.action, "poll")
self.assertEqual(inst.created.date, FHIRDate("2014-08-16").date)
self.assertEqual(inst.created.as_json(), "2014-08-16")
self.assertEqual(inst.id, "1110")
self.assertEqual(inst.identifier[0].system, "http://happyvalley.com/processrequest")
self.assertEqual(inst.identifier[0].value, "110")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the Poll ProcessRequest</div>")
self.assertEqual(inst.text.status, "generated")
|
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A basic AlphaZero implementation.
This implements the AlphaZero training algorithm. It spawns N actors which feed
trajectories into a replay buffer which are consumed by a learner. The learner
generates new weights, saves a checkpoint, and tells the actors to update. There
are also M evaluators running games continuously against a standard MCTS+Solver,
though each at a different difficulty (ie number of simulations for MCTS).
Due to the multi-process nature of this algorithm the logs are written to files,
one per process. The learner logs are also output to stdout. The checkpoints are
also written to the same directory.
Links to relevant articles/papers:
https://deepmind.com/blog/article/alphago-zero-starting-scratch has an open
access link to the AlphaGo Zero nature paper.
https://deepmind.com/blog/article/alphazero-shedding-new-light-grand-games-chess-shogi-and-go
has an open access link to the AlphaZero science paper.
"""
import collections
import datetime
import functools
import itertools
import json
import os
import random
import sys
import tempfile
import time
import traceback
import numpy as np
from open_spiel.python.algorithms import mcts
from open_spiel.python.algorithms.alpha_zero import evaluator as evaluator_lib
from open_spiel.python.algorithms.alpha_zero import model as model_lib
import pyspiel
from open_spiel.python.utils import data_logger
from open_spiel.python.utils import file_logger
from open_spiel.python.utils import spawn
from open_spiel.python.utils import stats
# Time to wait for processes to join.
JOIN_WAIT_DELAY = 0.001
class TrajectoryState(object):
"""A particular point along a trajectory."""
def __init__(self, observation, current_player, legals_mask, action, policy,
value):
self.observation = observation
self.current_player = current_player
self.legals_mask = legals_mask
self.action = action
self.policy = policy
self.value = value
class Trajectory(object):
"""A sequence of observations, actions and policies, and the outcomes."""
def __init__(self):
self.states = []
self.returns = None
def add(self, information_state, action, policy):
self.states.append((information_state, action, policy))
class Buffer(object):
"""A fixed size buffer that keeps the newest values."""
def __init__(self, max_size):
self.max_size = max_size
self.data = []
self.total_seen = 0 # The number of items that have passed through.
def __len__(self):
return len(self.data)
def __bool__(self):
return bool(self.data)
def append(self, val):
return self.extend([val])
def extend(self, batch):
batch = list(batch)
self.total_seen += len(batch)
self.data.extend(batch)
self.data[:-self.max_size] = []
def sample(self, count):
return random.sample(self.data, count)
class Config(collections.namedtuple(
"Config", [
"game",
"path",
"learning_rate",
"weight_decay",
"train_batch_size",
"replay_buffer_size",
"replay_buffer_reuse",
"max_steps",
"checkpoint_freq",
"actors",
"evaluators",
"evaluation_window",
"eval_levels",
"uct_c",
"max_simulations",
"policy_alpha",
"policy_epsilon",
"temperature",
"temperature_drop",
"nn_model",
"nn_width",
"nn_depth",
"observation_shape",
"output_size",
"quiet",
])):
"""A config for the model/experiment."""
pass
def _init_model_from_config(config):
return model_lib.Model.build_model(
config.nn_model,
config.observation_shape,
config.output_size,
config.nn_width,
config.nn_depth,
config.weight_decay,
config.learning_rate,
config.path)
def watcher(fn):
"""A decorator to fn/processes that gives a logger and logs exceptions."""
@functools.wraps(fn)
def _watcher(*, config, num=None, **kwargs):
"""Wrap the decorated function."""
name = fn.__name__
if num is not None:
name += "-" + str(num)
with file_logger.FileLogger(config.path, name, config.quiet) as logger:
print("{} started".format(name))
logger.print("{} started".format(name))
try:
return fn(config=config, logger=logger, **kwargs)
except Exception as e:
logger.print("\n".join([
"",
" Exception caught ".center(60, "="),
traceback.format_exc(),
"=" * 60,
]))
print("Exception caught in {}: {}".format(name, e))
raise
finally:
logger.print("{} exiting".format(name))
print("{} exiting".format(name))
return _watcher
def _init_bot(config, game, evaluator_, evaluation):
"""Initializes a bot."""
noise = None if evaluation else (config.policy_epsilon, config.policy_alpha)
return mcts.MCTSBot(
game,
config.uct_c,
config.max_simulations,
evaluator_,
solve=False,
dirichlet_noise=noise,
child_selection_fn=mcts.SearchNode.puct_value,
verbose=False,
dont_return_chance_node=True)
def _play_game(logger, game_num, game, bots, temperature, temperature_drop):
"""Play one game, return the trajectory."""
trajectory = Trajectory()
actions = []
state = game.new_initial_state()
random_state = np.random.RandomState()
logger.opt_print(" Starting game {} ".format(game_num).center(60, "-"))
logger.opt_print("Initial state:\n{}".format(state))
while not state.is_terminal():
if state.is_chance_node():
# For chance nodes, rollout according to chance node's probability
# distribution
outcomes = state.chance_outcomes()
action_list, prob_list = zip(*outcomes)
action = random_state.choice(action_list, p=prob_list)
state.apply_action(action)
else:
root = bots[state.current_player()].mcts_search(state)
policy = np.zeros(game.num_distinct_actions())
for c in root.children:
policy[c.action] = c.explore_count
policy = policy**(1 / temperature)
policy /= policy.sum()
if len(actions) >= temperature_drop:
action = root.best_child().action
else:
action = np.random.choice(len(policy), p=policy)
trajectory.states.append(
TrajectoryState(state.observation_tensor(), state.current_player(),
state.legal_actions_mask(), action, policy,
root.total_reward / root.explore_count))
action_str = state.action_to_string(state.current_player(), action)
actions.append(action_str)
logger.opt_print("Player {} sampled action: {}".format(
state.current_player(), action_str))
state.apply_action(action)
logger.opt_print("Next state:\n{}".format(state))
trajectory.returns = state.returns()
logger.print("Game {}: Returns: {}; Actions: {}".format(
game_num, " ".join(map(str, trajectory.returns)), " ".join(actions)))
return trajectory
def update_checkpoint(logger, queue, model, az_evaluator):
"""Read the queue for a checkpoint to load, or an exit signal."""
path = None
while True: # Get the last message, ignore intermediate ones.
try:
path = queue.get_nowait()
except spawn.Empty:
break
if path:
logger.print("Inference cache:", az_evaluator.cache_info())
logger.print("Loading checkpoint", path)
model.load_checkpoint(path)
az_evaluator.clear_cache()
elif path is not None: # Empty string means stop this process.
return False
return True
@watcher
def actor(*, config, game, logger, queue):
"""An actor process runner that generates games and returns trajectories."""
logger.print("Initializing model")
model = _init_model_from_config(config)
logger.print("Initializing bots")
az_evaluator = evaluator_lib.AlphaZeroEvaluator(game, model)
bots = [
_init_bot(config, game, az_evaluator, False),
_init_bot(config, game, az_evaluator, False),
]
for game_num in itertools.count():
if not update_checkpoint(logger, queue, model, az_evaluator):
return
queue.put(_play_game(logger, game_num, game, bots, config.temperature,
config.temperature_drop))
@watcher
def evaluator(*, game, config, logger, queue):
"""A process that plays the latest checkpoint vs standard MCTS."""
results = Buffer(config.evaluation_window)
logger.print("Initializing model")
model = _init_model_from_config(config)
logger.print("Initializing bots")
az_evaluator = evaluator_lib.AlphaZeroEvaluator(game, model)
random_evaluator = mcts.RandomRolloutEvaluator()
for game_num in itertools.count():
if not update_checkpoint(logger, queue, model, az_evaluator):
return
az_player = game_num % 2
difficulty = (game_num // 2) % config.eval_levels
max_simulations = int(config.max_simulations * (10 ** (difficulty / 2)))
bots = [
_init_bot(config, game, az_evaluator, True),
mcts.MCTSBot(
game,
config.uct_c,
max_simulations,
random_evaluator,
solve=True,
verbose=False,
dont_return_chance_node=True)
]
if az_player == 1:
bots = list(reversed(bots))
trajectory = _play_game(logger, game_num, game, bots, temperature=1,
temperature_drop=0)
results.append(trajectory.returns[az_player])
queue.put((difficulty, trajectory.returns[az_player]))
logger.print("AZ: {}, MCTS: {}, AZ avg/{}: {:.3f}".format(
trajectory.returns[az_player],
trajectory.returns[1 - az_player],
len(results), np.mean(results.data)))
@watcher
def learner(*, game, config, actors, evaluators, broadcast_fn, logger):
"""A learner that consumes the replay buffer and trains the network."""
logger.also_to_stdout = True
replay_buffer = Buffer(config.replay_buffer_size)
learn_rate = config.replay_buffer_size // config.replay_buffer_reuse
logger.print("Initializing model")
model = _init_model_from_config(config)
logger.print("Model type: %s(%s, %s)" % (config.nn_model, config.nn_width,
config.nn_depth))
logger.print("Model size:", model.num_trainable_variables, "variables")
save_path = model.save_checkpoint(0)
logger.print("Initial checkpoint:", save_path)
broadcast_fn(save_path)
data_log = data_logger.DataLoggerJsonLines(config.path, "learner", True)
stage_count = 7
value_accuracies = [stats.BasicStats() for _ in range(stage_count)]
value_predictions = [stats.BasicStats() for _ in range(stage_count)]
game_lengths = stats.BasicStats()
game_lengths_hist = stats.HistogramNumbered(game.max_game_length() + 1)
outcomes = stats.HistogramNamed(["Player1", "Player2", "Draw"])
evals = [Buffer(config.evaluation_window) for _ in range(config.eval_levels)]
total_trajectories = 0
def trajectory_generator():
"""Merge all the actor queues into a single generator."""
while True:
found = 0
for actor_process in actors:
try:
yield actor_process.queue.get_nowait()
except spawn.Empty:
pass
else:
found += 1
if found == 0:
time.sleep(0.01) # 10ms
def collect_trajectories():
"""Collects the trajectories from actors into the replay buffer."""
num_trajectories = 0
num_states = 0
for trajectory in trajectory_generator():
num_trajectories += 1
num_states += len(trajectory.states)
game_lengths.add(len(trajectory.states))
game_lengths_hist.add(len(trajectory.states))
p1_outcome = trajectory.returns[0]
if p1_outcome > 0:
outcomes.add(0)
elif p1_outcome < 0:
outcomes.add(1)
else:
outcomes.add(2)
replay_buffer.extend(
model_lib.TrainInput(
s.observation, s.legals_mask, s.policy, p1_outcome)
for s in trajectory.states)
for stage in range(stage_count):
# Scale for the length of the game
index = (len(trajectory.states) - 1) * stage // (stage_count - 1)
n = trajectory.states[index]
accurate = (n.value >= 0) == (trajectory.returns[n.current_player] >= 0)
value_accuracies[stage].add(1 if accurate else 0)
value_predictions[stage].add(abs(n.value))
if num_states >= learn_rate:
break
return num_trajectories, num_states
def learn(step):
"""Sample from the replay buffer, update weights and save a checkpoint."""
losses = []
for _ in range(len(replay_buffer) // config.train_batch_size):
data = replay_buffer.sample(config.train_batch_size)
losses.append(model.update(data))
# Always save a checkpoint, either for keeping or for loading the weights to
# the actors. It only allows numbers, so use -1 as "latest".
save_path = model.save_checkpoint(
step if step % config.checkpoint_freq == 0 else -1)
losses = sum(losses, model_lib.Losses(0, 0, 0)) / len(losses)
logger.print(losses)
logger.print("Checkpoint saved:", save_path)
return save_path, losses
last_time = time.time() - 60
for step in itertools.count(1):
for value_accuracy in value_accuracies:
value_accuracy.reset()
for value_prediction in value_predictions:
value_prediction.reset()
game_lengths.reset()
game_lengths_hist.reset()
outcomes.reset()
num_trajectories, num_states = collect_trajectories()
total_trajectories += num_trajectories
now = time.time()
seconds = now - last_time
last_time = now
logger.print("Step:", step)
logger.print(
("Collected {:5} states from {:3} games, {:.1f} states/s. "
"{:.1f} states/(s*actor), game length: {:.1f}").format(
num_states, num_trajectories, num_states / seconds,
num_states / (config.actors * seconds),
num_states / num_trajectories))
logger.print("Buffer size: {}. States seen: {}".format(
len(replay_buffer), replay_buffer.total_seen))
save_path, losses = learn(step)
for eval_process in evaluators:
while True:
try:
difficulty, outcome = eval_process.queue.get_nowait()
evals[difficulty].append(outcome)
except spawn.Empty:
break
batch_size_stats = stats.BasicStats() # Only makes sense in C++.
batch_size_stats.add(1)
data_log.write({
"step": step,
"total_states": replay_buffer.total_seen,
"states_per_s": num_states / seconds,
"states_per_s_actor": num_states / (config.actors * seconds),
"total_trajectories": total_trajectories,
"trajectories_per_s": num_trajectories / seconds,
"queue_size": 0, # Only available in C++.
"game_length": game_lengths.as_dict,
"game_length_hist": game_lengths_hist.data,
"outcomes": outcomes.data,
"value_accuracy": [v.as_dict for v in value_accuracies],
"value_prediction": [v.as_dict for v in value_predictions],
"eval": {
"count": evals[0].total_seen,
"results": [sum(e.data) / len(e) if e else 0 for e in evals],
},
"batch_size": batch_size_stats.as_dict,
"batch_size_hist": [0, 1],
"loss": {
"policy": losses.policy,
"value": losses.value,
"l2reg": losses.l2,
"sum": losses.total,
},
"cache": { # Null stats because it's hard to report between processes.
"size": 0,
"max_size": 0,
"usage": 0,
"requests": 0,
"requests_per_s": 0,
"hits": 0,
"misses": 0,
"misses_per_s": 0,
"hit_rate": 0,
},
})
logger.print()
if config.max_steps > 0 and step >= config.max_steps:
break
broadcast_fn(save_path)
def alpha_zero(config: Config):
"""Start all the worker processes for a full alphazero setup."""
game = pyspiel.load_game(config.game)
config = config._replace(
observation_shape=game.observation_tensor_shape(),
output_size=game.num_distinct_actions())
print("Starting game", config.game)
if game.num_players() != 2:
sys.exit("AlphaZero can only handle 2-player games.")
game_type = game.get_type()
if game_type.reward_model != pyspiel.GameType.RewardModel.TERMINAL:
raise ValueError("Game must have terminal rewards.")
if game_type.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("Game must have sequential turns.")
if game_type.chance_mode != pyspiel.GameType.ChanceMode.DETERMINISTIC:
raise ValueError("Game must be deterministic.")
path = config.path
if not path:
path = tempfile.mkdtemp(prefix="az-{}-{}-".format(
datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"), config.game))
config = config._replace(path=path)
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
sys.exit("{} isn't a directory".format(path))
print("Writing logs and checkpoints to:", path)
print("Model type: %s(%s, %s)" % (config.nn_model, config.nn_width,
config.nn_depth))
with open(os.path.join(config.path, "config.json"), "w") as fp:
fp.write(json.dumps(config._asdict(), indent=2, sort_keys=True) + "\n")
actors = [spawn.Process(actor, kwargs={"game": game, "config": config,
"num": i})
for i in range(config.actors)]
evaluators = [spawn.Process(evaluator, kwargs={"game": game, "config": config,
"num": i})
for i in range(config.evaluators)]
def broadcast(msg):
for proc in actors + evaluators:
proc.queue.put(msg)
try:
learner(game=game, config=config, actors=actors, # pylint: disable=missing-kwoa
evaluators=evaluators, broadcast_fn=broadcast)
except (KeyboardInterrupt, EOFError):
print("Caught a KeyboardInterrupt, stopping early.")
finally:
broadcast("")
# for actor processes to join we have to make sure that their q_in is empty,
# including backed up items
for proc in actors:
while proc.exitcode is None:
while not proc.queue.empty():
proc.queue.get_nowait()
proc.join(JOIN_WAIT_DELAY)
for proc in evaluators:
proc.join()
|
|
from .bases import _StandardStemmer
from whoosh.compat import u
class FinnishStemmer(_StandardStemmer):
"""
The Finnish Snowball stemmer.
:cvar __vowels: The Finnish vowels.
:type __vowels: unicode
:cvar __restricted_vowels: A subset of the Finnish vowels.
:type __restricted_vowels: unicode
:cvar __long_vowels: The Finnish vowels in their long forms.
:type __long_vowels: tuple
:cvar __consonants: The Finnish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Finnish double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Finnish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/finnish/stemmer.html
"""
__vowels = u("aeiouy\xE4\xF6")
__restricted_vowels = u("aeiou\xE4\xF6")
__long_vowels = ("aa", "ee", "ii", "oo", "uu", u("\xE4\xE4"),
u("\xF6\xF6"))
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__step1_suffixes = ('kaan', u('k\xE4\xE4n'), 'sti', 'kin', 'han',
u('h\xE4n'), 'ko', u('k\xF6'), 'pa', u('p\xE4'))
__step2_suffixes = ('nsa', u('ns\xE4'), 'mme', 'nne', 'si', 'ni',
'an', u('\xE4n'), 'en')
__step3_suffixes = ('siin', 'tten', 'seen', 'han', 'hen', 'hin',
'hon', u('h\xE4n'), u('h\xF6n'), 'den', 'tta',
u('tt\xE4'), 'ssa', u('ss\xE4'), 'sta',
u('st\xE4'), 'lla', u('ll\xE4'), 'lta',
u('lt\xE4'), 'lle', 'ksi', 'ine', 'ta',
u('t\xE4'), 'na', u('n\xE4'), 'a', u('\xE4'),
'n')
__step4_suffixes = ('impi', 'impa', u('imp\xE4'), 'immi', 'imma',
u('imm\xE4'), 'mpi', 'mpa', u('mp\xE4'), 'mmi',
'mma', u('mm\xE4'), 'eja', u('ej\xE4'))
def stem(self, word):
"""
Stem a Finnish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
step3_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 1: Particles etc.
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "sti":
if suffix in r2:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
if word[-len(suffix) - 1] in u("ntaeiouy\xE4\xF6"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2: Possessives
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "si":
if word[-3] != "k":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ni":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith("kse"):
word = "".join((word[:-3], "ksi"))
if r1.endswith("kse"):
r1 = "".join((r1[:-3], "ksi"))
if r2.endswith("kse"):
r2 = "".join((r2[:-3], "ksi"))
elif suffix == "an":
if (word[-4:-2] in ("ta", "na") or
word[-5:-2] in ("ssa", "sta", "lla", "lta")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u("\xE4n"):
if (word[-4:-2] in (u("t\xE4"), u("n\xE4")) or
word[-5:-2] in (u("ss\xE4"), u("st\xE4"),
u("ll\xE4"), u("lt\xE4"))):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "en":
if word[-5:-2] in ("lle", "ine"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
break
# STEP 3: Cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("han", "hen", "hin", "hon", u("h\xE4n"),
u("h\xF6n")):
if ((suffix == "han" and word[-4] == "a") or
(suffix == "hen" and word[-4] == "e") or
(suffix == "hin" and word[-4] == "i") or
(suffix == "hon" and word[-4] == "o") or
(suffix == u("h\xE4n") and word[-4] == u("\xE4")) or
(suffix == u("h\xF6n") and word[-4] == u("\xF6"))):
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix in ("siin", "den", "tten"):
if (word[-len(suffix) - 1] == "i" and
word[-len(suffix) - 2] in self.__restricted_vowels):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
else:
continue
elif suffix == "seen":
if word[-6:-4] in self.__long_vowels:
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
step3_success = True
else:
continue
elif suffix in ("a", u("\xE4")):
if word[-2] in self.__vowels and word[-3] in self.__consonants:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
elif suffix in ("tta", u("tt\xE4")):
if word[-4] == "e":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix == "n":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
if word[-2:] == "ie" or word[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
break
# STEP 4: Other endings
for suffix in self.__step4_suffixes:
if r2.endswith(suffix):
if suffix in ("mpi", "mpa", u("mp\xE4"), "mmi", "mma",
u("mm\xE4")):
if word[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5: Plurals
if step3_success and len(r1) >= 1 and r1[-1] in "ij":
word = word[:-1]
r1 = r1[:-1]
elif (not step3_success and len(r1) >= 2 and
r1[-1] == "t" and r1[-2] in self.__vowels):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if r2.endswith("imma"):
word = word[:-4]
r1 = r1[:-4]
elif r2.endswith("mma") and r2[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
# STEP 6: Tidying up
if r1[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
if (len(r1) >= 2 and r1[-2] in self.__consonants and
r1[-1] in u("a\xE4ei")):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith(("oj", "uj")):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith("jo"):
word = word[:-1]
r1 = r1[:-1]
# If the word ends with a double consonant
# followed by zero or more vowels, the last consonant is removed.
for i in range(1, len(word)):
if word[-i] in self.__vowels:
continue
else:
if i == 1:
if word[-i - 1:] in self.__double_consonants:
word = word[:-1]
else:
if word[-i - 1:-i + 1] in self.__double_consonants:
word = "".join((word[:-i], word[-i + 1:]))
break
return word
|
|
from __future__ import unicode_literals
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import no_oracle
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import override_settings
from django.utils import timezone
if HAS_GEOS:
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article, Event
@skipUnlessDBFeature("gis_enabled")
class RelatedGeoModelTest(TestCase):
fixtures = ['initial']
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.order_by('id')
qs2 = City.objects.order_by('id').select_related()
qs3 = City.objects.order_by('id').select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@skipUnlessDBFeature("has_transform_method")
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@skipUnlessDBFeature("supports_extent_aggr")
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e):
self.assertAlmostEqual(ref_val, e_val, tol)
@skipUnlessDBFeature("has_unionagg_method")
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(
name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth'),
).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(type(u1), MultiPoint)
self.assertEqual(type(u3), MultiPoint)
# Ordering of points in the result of the union is not defined and
# implementation-dependent (DB backend, GEOS version)
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u1})
self.assertSetEqual({p.ewkt for p in ref_u2}, {p.ewkt for p in u2})
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u3})
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry(
'POLYGON((-97.501205 33.052520,-97.501205 33.052576,'
'-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))',
srid=4326
)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if connection.features.supports_transform:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if connection.features.supports_transform:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertIsInstance(d['point'], Geometry)
self.assertIsInstance(t[1], Geometry)
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
@override_settings(USE_TZ=True)
def test_07b_values(self):
"Testing values() and values_list() with aware datetime. See #21565."
Event.objects.create(name="foo", when=timezone.now())
list(Event.objects.values_list('when'))
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
# TODO: fix on Oracle -- qs2 returns an empty result for an unknown reason
@no_oracle
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertIsInstance(qs[0]['point'], GEOSGeometry)
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@skipUnlessDBFeature("supports_collect_aggr")
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry(
'MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,'
'-95.363151 29.763374,-96.801611 32.782057)'
)
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertTrue(ref_geom.equals(coll))
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
|
|
# Copyright (c) 2010 ActiveState Software Inc. All rights reserved.
"""Textual UI: progress bar and colprint"""
import sys
from datetime import datetime, timedelta
from contextlib import contextmanager
import logging
import math
import six
LOG = logging.getLogger(__name__)
__all__ = ['colprint', 'find_console_width', 'ProgressBar',
'clear_progress_bar', 'redraw_progress_bar', 'safe_output']
if not six.PY3:
input = raw_input
class ProgressBar(object):
"""Show percent progress every 'n' seconds"""
def __init__(self, total, delay=0.1, show_size=lambda x: x, note=None):
"""
total - total number of items that are going to be processed
delay - update delay in seconds
show_size - function to return the string to display instead of the number `size`
"""
assert total >= 0, total
assert delay >= 0, delay
assert show_size
_set_current_progress_bar(self)
self.delay = timedelta(seconds=delay)
self.delay_duration = timedelta(seconds=1)
self.start = datetime.now()
self.elapsed = None # time elapsed from start
self.estimated_time_left = None
self.lastprint = None
self.lastprint_duration = None # for updating duration/ETA
self.lastprocessed = 0
self.total = total
self.processed = 0
self.show_size = show_size
self.note = note
self.duration_display = ''
self._length = 0 # current length of the progress display
@classmethod
def iterate(cls, sequence, note=None, post=None):
"""Iterate a sequence and update the progress bar accordingly
The sequence must have a 'len' attribute if it is an arbitrary
generator.
note -- Text to print before the progress bar
post -- Text to print at the end of progress (w/ fmt vars)
"""
p = cls(len(sequence), note=note)
clean_exit = False
try:
for item in sequence:
yield item
p.tick()
clean_exit = True
finally:
p.close()
if post and clean_exit:
sys.stdout.write(post.format(**p.__dict__) + '\n')
def tick(self, items=1):
"""The method that updates the display if necessary.
After creating the ``PercentProgress`` object, this method must be
called for every item processed (or, pass items=ITEMS for every ITEMS
processed).
This method must be called no more than ``self.total`` times (otherwise
you get assertion error .. implying a bug in your code)
Return True if progress bar was redrawn.
"""
self.processed += items
assert self.processed <= self.total, \
'{0} <= {1}'.format(self.processed, self.total)
now = datetime.now()
if (self.lastprint == None or
(now - self.lastprint) > self.delay):
self.lastprint = now
self.redraw()
return True
else:
return False
def clear(self):
"""Erase the entire progress bar and put the cursor at first column"""
# Move cursor to the beginning of current progress line so that further
# messages will overwrite the progress bar. Also overwrite the previous
# progress bar with empty space.
sys.stdout.write('\r' + ' '*self._length + '\r')
sys.stdout.flush()
def close(self):
"""Close (hide) the progress bar
Erase the progress bar and print the closing message in place of the
previous progress bar text.
"""
self.redraw()
self.clear()
_del_current_progress_bar(self)
def redraw(self):
self.clear()
percent = _calculate_percent(self.processed, self.total)
now = datetime.now()
self.elapsed = now - self.start
if self.processed:
self.estimated_time_left = self.elapsed.seconds * (self.total-self.processed)/self.processed
# Update time elapsed/left once a second only (delay_duration = 1s).
if self.elapsed.seconds and (
self.lastprint_duration is None or \
now - self.lastprint_duration > self.delay_duration):
self.lastprint_duration = now
elapsed = _format_duration(self.elapsed.seconds)
if self.estimated_time_left:
self.duration_display = '({0}; {1} left)'.format(
elapsed, _format_duration(self.estimated_time_left))
else:
self.duration_display = '({0})'.format(elapsed)
bar_width = 20
bar_filled = int(round(20.0/100 * percent))
filled = ['='] * bar_filled
if filled:
filled[-1] = '>'
filled = ''.join(filled)
progress_bar = ''.join([
(self.note+': ') if self.note else '',
# header:
'[',
# solid bar
filled,
# empty space
' ' * (bar_width-bar_filled),
# footer
'] {0:-3}% {1}/{2} {3}'.format(
percent,
self.show_size(self.processed),
self.show_size(self.total),
self.duration_display
)
])
self._length = len(progress_bar)
sys.stdout.write('\r' + progress_bar + '\r')
sys.stdout.flush()
def clear_progress_bar():
"""Clear progress bar, if any"""
if _current_progress_bar:
_current_progress_bar.clear()
def redraw_progress_bar():
"""Redraw progress bar, if any"""
if _current_progress_bar:
_current_progress_bar.redraw()
@contextmanager
def safe_output():
"""Wrapper that makes it safe to print to stdout
If a progress bar is currently being shown, this wrapper takes care of
clearing it before .. and then redrawing it after
"""
clear_progress_bar()
yield
redraw_progress_bar()
def askyesno(question, default):
"""Ask (Y/N) type of question to the user"""
assert isinstance(default, bool), '"default" must be a boolean'
s = '{0} ({1}/{2}) '.format(
question,
default and 'Y' or 'y',
default and 'n' or 'N')
while True:
val = input(s).strip().lower()
if val == '':
return default
elif val in ('y', 'yes', 'ok'):
return True
elif val in ('n', 'no'):
return False
# This function was written by Alex Martelli
# http://stackoverflow.com/questions/1396820/
def colprint(table, totwidth=None):
"""Print the table in terminal taking care of wrapping/alignment
- `table`: A table of strings. Elements must not be `None`
- `totwidth`: If None, console width is used
"""
if not table: return
if totwidth is None:
totwidth = find_console_width()
if totwidth is not None:
totwidth -= 1 # for not printing an extra empty line on windows
numcols = max(len(row) for row in table)
# ensure all rows have >= numcols columns, maybe empty
padded = [row+numcols*['',] for row in table]
# compute col widths, including separating space (except for last one)
widths = [ 1 + max(len(x) for x in column) for column in zip(*padded)]
widths[-1] -= 1
# drop or truncate columns from the right in order to fit
if totwidth is not None:
while sum(widths) > totwidth:
mustlose = sum(widths) - totwidth
if widths[-1] <= mustlose:
del widths[-1]
else:
widths[-1] -= mustlose
break
# and finally, the output phase!
for row in padded:
s = ''.join(['%*s' % (-w, i[:w])
for w, i in zip(widths, row)])
LOG.info(s)
def find_console_width():
"""Return the console width
Return ``None`` if stdout is not a terminal (eg: a pipe)
"""
if sys.platform.startswith('win'):
return _find_windows_console_width()
else:
return _find_unix_console_width()
@contextmanager
def longrun(log, finalfn=lambda: None):
"""Decorator for performing a long operation with consideration for the
command line.
1. Catch keyboard interrupts and exit gracefully
2. Print total time elapsed always at the end (successful or not)
3. Call ``finalfn`` always at the end (successful or not)
"""
start_time = datetime.now()
try:
yield
except KeyboardInterrupt:
log.info('*** interrupted by user - Ctrl+c ***')
raise SystemExit(3)
finally:
finalfn()
end_time = datetime.now()
log.info('')
log.info('-----')
log.info('Total time elapsed: %s', end_time-start_time)
def _find_unix_console_width():
import termios, fcntl, struct, sys
# fcntl.ioctl will fail if stdout is not a tty
if not sys.stdout.isatty():
return None
s = struct.pack("HHHH", 0, 0, 0, 0)
fd_stdout = sys.stdout.fileno()
size = fcntl.ioctl(fd_stdout, termios.TIOCGWINSZ, s)
height, width = struct.unpack("HHHH", size)[:2]
return width
def _find_windows_console_width():
# http://code.activestate.com/recipes/440694/
from ctypes import windll, create_string_buffer
STDIN, STDOUT, STDERR = -10, -11, -12
h = windll.kernel32.GetStdHandle(STDERR)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex
def _byteshr(bytes):
"""Human-readable version of bytes count"""
for x in ['bytes','KB','MB','GB','TB']:
if bytes < 1024.0:
return "%3.1f%s" % (bytes, x)
bytes /= 1024.0
raise ValueError('cannot find human-readable version')
def _calculate_percent(numerator, denominator):
assert numerator <= denominator, '%d <= %d' % (numerator, denominator)
if denominator == 0:
if numerator == 0:
return 100
else:
raise ValueError('denominator cannot be zero')
return int(round( numerator / float(denominator) * 100 ))
def _format_duration(seconds):
s = []
if seconds > 60:
s.append('{0}m'.format(int(seconds/60)))
s.append('{0}s'.format(int(seconds % 60)))
return ''.join(s)
# Handle to the current progress bar object. There cannot be more than one
# progress bar for obvious reasons.
_current_progress_bar = None
def _set_current_progress_bar(pbar):
global _current_progress_bar
assert _current_progress_bar is None, 'there is already a pbar'
_current_progress_bar = pbar
def _del_current_progress_bar(pbar):
global _current_progress_bar
assert _current_progress_bar is pbar, 'pbar is something else'
_current_progress_bar = None
|
|
from sympy import evalf, symbols, zeros, pi, sin, cos, sqrt, acos, Matrix
from sympy.physics.mechanics import (ReferenceFrame, dynamicsymbols, inertia,
KanesMethod, RigidBody, Point, dot)
from sympy.utilities.pytest import slow
@slow
def test_bicycle():
# Code to get equations of motion for a bicycle modeled as in:
# J.P Meijaard, Jim M Papadopoulos, Andy Ruina and A.L Schwab. Linearized
# dynamics equations for the balance and steer of a bicycle: a benchmark
# and review. Proceedings of The Royal Society (2007) 463, 1955-1982
# doi: 10.1098/rspa.2007.1857
# Note that this code has been crudely ported from Autolev, which is the
# reason for some of the unusual naming conventions. It was purposefully as
# similar as possible in order to aide debugging.
# Declare Coordinates & Speeds
# Simple definitions for qdots - qd = u
# Speeds are: yaw frame ang. rate, roll frame ang. rate, rear wheel frame
# ang. rate (spinning motion), frame ang. rate (pitching motion), steering
# frame ang. rate, and front wheel ang. rate (spinning motion).
# Wheel positions are ignorable coordinates, so they are not introduced.
q1, q2, q4, q5 = dynamicsymbols('q1 q2 q4 q5')
q1d, q2d, q4d, q5d = dynamicsymbols('q1 q2 q4 q5', 1)
u1, u2, u3, u4, u5, u6 = dynamicsymbols('u1 u2 u3 u4 u5 u6')
u1d, u2d, u3d, u4d, u5d, u6d = dynamicsymbols('u1 u2 u3 u4 u5 u6', 1)
# Declare System's Parameters
WFrad, WRrad, htangle, forkoffset = symbols('WFrad WRrad htangle forkoffset')
forklength, framelength, forkcg1 = symbols('forklength framelength forkcg1')
forkcg3, framecg1, framecg3, Iwr11 = symbols('forkcg3 framecg1 framecg3 Iwr11')
Iwr22, Iwf11, Iwf22, Iframe11 = symbols('Iwr22 Iwf11 Iwf22 Iframe11')
Iframe22, Iframe33, Iframe31, Ifork11 = symbols('Iframe22 Iframe33 Iframe31 Ifork11')
Ifork22, Ifork33, Ifork31, g = symbols('Ifork22 Ifork33 Ifork31 g')
mframe, mfork, mwf, mwr = symbols('mframe mfork mwf mwr')
# Set up reference frames for the system
# N - inertial
# Y - yaw
# R - roll
# WR - rear wheel, rotation angle is ignorable coordinate so not oriented
# Frame - bicycle frame
# TempFrame - statically rotated frame for easier reference inertia definition
# Fork - bicycle fork
# TempFork - statically rotated frame for easier reference inertia definition
# WF - front wheel, again posses a ignorable coordinate
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
R = Y.orientnew('R', 'Axis', [q2, Y.x])
Frame = R.orientnew('Frame', 'Axis', [q4 + htangle, R.y])
WR = ReferenceFrame('WR')
TempFrame = Frame.orientnew('TempFrame', 'Axis', [-htangle, Frame.y])
Fork = Frame.orientnew('Fork', 'Axis', [q5, Frame.x])
TempFork = Fork.orientnew('TempFork', 'Axis', [-htangle, Fork.y])
WF = ReferenceFrame('WF')
# Kinematics of the Bicycle First block of code is forming the positions of
# the relevant points
# rear wheel contact -> rear wheel mass center -> frame mass center +
# frame/fork connection -> fork mass center + front wheel mass center ->
# front wheel contact point
WR_cont = Point('WR_cont')
WR_mc = WR_cont.locatenew('WR_mc', WRrad * R.z)
Steer = WR_mc.locatenew('Steer', framelength * Frame.z)
Frame_mc = WR_mc.locatenew('Frame_mc', - framecg1 * Frame.x
+ framecg3 * Frame.z)
Fork_mc = Steer.locatenew('Fork_mc', - forkcg1 * Fork.x
+ forkcg3 * Fork.z)
WF_mc = Steer.locatenew('WF_mc', forklength * Fork.x + forkoffset * Fork.z)
WF_cont = WF_mc.locatenew('WF_cont', WFrad * (dot(Fork.y, Y.z) * Fork.y -
Y.z).normalize())
# Set the angular velocity of each frame.
# Angular accelerations end up being calculated automatically by
# differentiating the angular velocities when first needed.
# u1 is yaw rate
# u2 is roll rate
# u3 is rear wheel rate
# u4 is frame pitch rate
# u5 is fork steer rate
# u6 is front wheel rate
Y.set_ang_vel(N, u1 * Y.z)
R.set_ang_vel(Y, u2 * R.x)
WR.set_ang_vel(Frame, u3 * Frame.y)
Frame.set_ang_vel(R, u4 * Frame.y)
Fork.set_ang_vel(Frame, u5 * Fork.x)
WF.set_ang_vel(Fork, u6 * Fork.y)
# Form the velocities of the previously defined points, using the 2 - point
# theorem (written out by hand here). Accelerations again are calculated
# automatically when first needed.
WR_cont.set_vel(N, 0)
WR_mc.v2pt_theory(WR_cont, N, WR)
Steer.v2pt_theory(WR_mc, N, Frame)
Frame_mc.v2pt_theory(WR_mc, N, Frame)
Fork_mc.v2pt_theory(Steer, N, Fork)
WF_mc.v2pt_theory(Steer, N, Fork)
WF_cont.v2pt_theory(WF_mc, N, WF)
# Sets the inertias of each body. Uses the inertia frame to construct the
# inertia dyadics. Wheel inertias are only defined by principle moments of
# inertia, and are in fact constant in the frame and fork reference frames;
# it is for this reason that the orientations of the wheels does not need
# to be defined. The frame and fork inertias are defined in the 'Temp'
# frames which are fixed to the appropriate body frames; this is to allow
# easier input of the reference values of the benchmark paper. Note that
# due to slightly different orientations, the products of inertia need to
# have their signs flipped; this is done later when entering the numerical
# value.
Frame_I = (inertia(TempFrame, Iframe11, Iframe22, Iframe33, 0, 0, Iframe31), Frame_mc)
Fork_I = (inertia(TempFork, Ifork11, Ifork22, Ifork33, 0, 0, Ifork31), Fork_mc)
WR_I = (inertia(Frame, Iwr11, Iwr22, Iwr11), WR_mc)
WF_I = (inertia(Fork, Iwf11, Iwf22, Iwf11), WF_mc)
# Declaration of the RigidBody containers. ::
BodyFrame = RigidBody('BodyFrame', Frame_mc, Frame, mframe, Frame_I)
BodyFork = RigidBody('BodyFork', Fork_mc, Fork, mfork, Fork_I)
BodyWR = RigidBody('BodyWR', WR_mc, WR, mwr, WR_I)
BodyWF = RigidBody('BodyWF', WF_mc, WF, mwf, WF_I)
# The kinematic differential equations; they are defined quite simply. Each
# entry in this list is equal to zero.
kd = [q1d - u1, q2d - u2, q4d - u4, q5d - u5]
# The nonholonomic constraints are the velocity of the front wheel contact
# point dotted into the X, Y, and Z directions; the yaw frame is used as it
# is "closer" to the front wheel (1 less DCM connecting them). These
# constraints force the velocity of the front wheel contact point to be 0
# in the inertial frame; the X and Y direction constraints enforce a
# "no-slip" condition, and the Z direction constraint forces the front
# wheel contact point to not move away from the ground frame, essentially
# replicating the holonomic constraint which does not allow the frame pitch
# to change in an invalid fashion.
conlist_speed = [WF_cont.vel(N) & Y.x, WF_cont.vel(N) & Y.y, WF_cont.vel(N) & Y.z]
# The holonomic constraint is that the position from the rear wheel contact
# point to the front wheel contact point when dotted into the
# normal-to-ground plane direction must be zero; effectively that the front
# and rear wheel contact points are always touching the ground plane. This
# is actually not part of the dynamic equations, but instead is necessary
# for the lineraization process.
conlist_coord = [WF_cont.pos_from(WR_cont) & Y.z]
# The force list; each body has the appropriate gravitational force applied
# at its mass center.
FL = [(Frame_mc, -mframe * g * Y.z),
(Fork_mc, -mfork * g * Y.z),
(WF_mc, -mwf * g * Y.z),
(WR_mc, -mwr * g * Y.z)]
BL = [BodyFrame, BodyFork, BodyWR, BodyWF]
# The N frame is the inertial frame, coordinates are supplied in the order
# of independent, dependent coordinates, as are the speeds. The kinematic
# differential equation are also entered here. Here the dependent speeds
# are specified, in the same order they were provided in earlier, along
# with the non-holonomic constraints. The dependent coordinate is also
# provided, with the holonomic constraint. Again, this is only provided
# for the linearization process.
KM = KanesMethod(N, q_ind=[q1, q2, q5],
q_dependent=[q4], configuration_constraints=conlist_coord,
u_ind=[u2, u3, u5],
u_dependent=[u1, u4, u6], velocity_constraints=conlist_speed,
kd_eqs=kd)
(fr, frstar) = KM.kanes_equations(FL, BL)
# This is the start of entering in the numerical values from the benchmark
# paper to validate the eigen values of the linearized equations from this
# model to the reference eigen values. Look at the aforementioned paper for
# more information. Some of these are intermediate values, used to
# transform values from the paper into the coordinate systems used in this
# model.
PaperRadRear = 0.3
PaperRadFront = 0.35
HTA = evalf.N(pi / 2 - pi / 10)
TrailPaper = 0.08
rake = evalf.N(-(TrailPaper*sin(HTA)-(PaperRadFront*cos(HTA))))
PaperWb = 1.02
PaperFrameCgX = 0.3
PaperFrameCgZ = 0.9
PaperForkCgX = 0.9
PaperForkCgZ = 0.7
FrameLength = evalf.N(PaperWb*sin(HTA)-(rake-(PaperRadFront-PaperRadRear)*cos(HTA)))
FrameCGNorm = evalf.N((PaperFrameCgZ - PaperRadRear-(PaperFrameCgX/sin(HTA))*cos(HTA))*sin(HTA))
FrameCGPar = evalf.N((PaperFrameCgX / sin(HTA) + (PaperFrameCgZ - PaperRadRear - PaperFrameCgX / sin(HTA) * cos(HTA)) * cos(HTA)))
tempa = evalf.N((PaperForkCgZ - PaperRadFront))
tempb = evalf.N((PaperWb-PaperForkCgX))
tempc = evalf.N(sqrt(tempa**2+tempb**2))
PaperForkL = evalf.N((PaperWb*cos(HTA)-(PaperRadFront-PaperRadRear)*sin(HTA)))
ForkCGNorm = evalf.N(rake+(tempc * sin(pi/2-HTA-acos(tempa/tempc))))
ForkCGPar = evalf.N(tempc * cos((pi/2-HTA)-acos(tempa/tempc))-PaperForkL)
# Here is the final assembly of the numerical values. The symbol 'v' is the
# forward speed of the bicycle (a concept which only makes sense in the
# upright, static equilibrium case?). These are in a dictionary which will
# later be substituted in. Again the sign on the *product* of inertia
# values is flipped here, due to different orientations of coordinate
# systems.
v = symbols('v')
val_dict = {WFrad: PaperRadFront,
WRrad: PaperRadRear,
htangle: HTA,
forkoffset: rake,
forklength: PaperForkL,
framelength: FrameLength,
forkcg1: ForkCGPar,
forkcg3: ForkCGNorm,
framecg1: FrameCGNorm,
framecg3: FrameCGPar,
Iwr11: 0.0603,
Iwr22: 0.12,
Iwf11: 0.1405,
Iwf22: 0.28,
Ifork11: 0.05892,
Ifork22: 0.06,
Ifork33: 0.00708,
Ifork31: 0.00756,
Iframe11: 9.2,
Iframe22: 11,
Iframe33: 2.8,
Iframe31: -2.4,
mfork: 4,
mframe: 85,
mwf: 3,
mwr: 2,
g: 9.81,
q1: 0,
q2: 0,
q4: 0,
q5: 0,
u1: 0,
u2: 0,
u3: v / PaperRadRear,
u4: 0,
u5: 0,
u6: v / PaperRadFront}
# Linearizes the forcing vector; the equations are set up as MM udot =
# forcing, where MM is the mass matrix, udot is the vector representing the
# time derivatives of the generalized speeds, and forcing is a vector which
# contains both external forcing terms and internal forcing terms, such as
# centripital or coriolis forces. This actually returns a matrix with as
# many rows as *total* coordinates and speeds, but only as many columns as
# independent coordinates and speeds.
forcing_lin = KM.linearize()[0]
# As mentioned above, the size of the linearized forcing terms is expanded
# to include both q's and u's, so the mass matrix must have this done as
# well. This will likely be changed to be part of the linearized process,
# for future reference.
MM_full = KM.mass_matrix_full
MM_full_s = MM_full.subs(val_dict)
forcing_lin_s = forcing_lin.subs(KM.kindiffdict()).subs(val_dict)
MM_full_s = MM_full_s.evalf()
forcing_lin_s = forcing_lin_s.evalf()
# Finally, we construct an "A" matrix for the form xdot = A x (x being the
# state vector, although in this case, the sizes are a little off). The
# following line extracts only the minimum entries required for eigenvalue
# analysis, which correspond to rows and columns for lean, steer, lean
# rate, and steer rate.
Amat = MM_full_s.inv() * forcing_lin_s
A = Amat.extract([1, 2, 4, 6], [1, 2, 3, 5])
# Precomputed for comparison
Res = Matrix([[ 0, 0, 1.0, 0],
[ 0, 0, 0, 1.0],
[9.48977444677355, -0.891197738059089*v**2 - 0.571523173729245, -0.105522449805691*v, -0.330515398992311*v],
[11.7194768719633, -1.97171508499972*v**2 + 30.9087533932407, 3.67680523332152*v, -3.08486552743311*v]])
# Actual eigenvalue comparison
for i in range(6):
eps = 1.e-12
error = Res.subs(v, i) - A.subs(v, i)
to_test = error.applyfunc(lambda x: x < eps)
assert min(to_test)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf # tf
from summae import model as m
# pylint: disable=invalid-name
class ModelTest(tf.test.TestCase):
def assertIntsInRange(self, i_np, a, b):
self.assertEqual(0, np.sum(i_np >= b))
self.assertEqual(0, np.sum(i_np < a))
def test_prepend_token(self):
vs = 10
bs = 2
es = 3
emb_matrix_VxE = tf.random_normal([vs, es])
x_BxLxE = tf.random.normal((bs, 10, es))
x_prepend = m.prepend_token(x_BxLxE, emb_matrix_VxE, 2)
self.assertEqual((bs, 11, es), x_prepend.get_shape())
with self.session() as ss:
[x_np, xp_np, emb_np] = ss.run([x_BxLxE, x_prepend, emb_matrix_VxE])
# First element is special token.
self.assertAllEqual(np.tile(emb_np[2, :], (bs, 1)),
np.squeeze(xp_np[:, 0, :]))
# Rest are the same.
self.assertAllEqual(x_np, xp_np[:, 1:, :])
def test_gru_encoder_pooling_method(self):
es = 7
hs = 8
ls = 4
for bidirect_encode in [True, False]:
for pooling in ['mean', 'last']:
tf.reset_default_graph()
encoder = m.GruEncoder(
hidden_size=hs,
latent_size=ls,
scope='gru_encoder',
bidirect_encode=bidirect_encode)
x_BxLxE = tf.random.normal((2, 10, es)) # batch_size = 2
seq_lengths_B = tf.constant([3, 6])
encoded_BxH, _ = encoder.encode(x_BxLxE, seq_lengths_B, pooling, True)
with self.session() as ss:
ss.run(tf.initializers.global_variables())
encoded_BxH_np = ss.run(encoded_BxH)
self.assertEqual((2, ls), encoded_BxH_np.shape)
def test_transformer_encoder_pooling_method(self):
es = 8
nl = 2
nh = 4
fs = 7
hs = 8
ls = 4
for pooling in ['mean', 'first']:
tf.reset_default_graph()
encoder = m.TransformerEncoder(
num_layers=nl,
num_heads=nh,
hidden_size=hs,
filter_size=fs,
attention_dropout=0.1,
relu_dropout=0.1,
postprocess_dropout=0.1,
latent_size=4,
scope='trf_encoder')
x_BxLxE = tf.random.normal((2, 10, es)) # batch_size = 2
seq_lengths_B = tf.constant([3, 6])
encoded_BxH, _ = encoder.encode(x_BxLxE, seq_lengths_B, pooling, True)
with self.session() as ss:
ss.run(tf.initializers.global_variables())
encoded_BxH_np = ss.run(encoded_BxH)
self.assertEqual((2, ls), encoded_BxH_np.shape)
def test_gru_decoder_teacher_force(self):
vs = 9
h = 10
z = 2
bs = 5
es = 3
max_seq_length = 11
nl = 6
for cond_only_init in [True, False]:
if cond_only_init:
h = z
tf.reset_default_graph()
emb_matrix_VxE = tf.get_variable(
'emb',
shape=[vs, es],
initializer=tf.truncated_normal_initializer(stddev=0.01))
decoder = m.GruDecoder(
h, vs, emb_matrix_VxE, 10, nl, cond_only_init=cond_only_init)
# Test teacher_force shapes
seq_lengths = tf.constant([3, 2, 4, 1, 2])
dec_out = decoder.teacher_force(
tf.random.normal((bs, h)), # state
tf.random.normal((bs, max_seq_length, es)), # dec_inputs
seq_lengths) # lengths
with self.session() as ss:
ss.run(tf.initializers.global_variables())
dec_out_np = ss.run(dec_out)
self.assertEqual((bs, max_seq_length, vs), dec_out_np.shape)
# TODO(peterjliu): Check zeros beyond sequence length
def test_gru_decoder_decode_v(self):
vs = 9
h = 10
z = 2
bs = 5
es = 3
nl = 6
k = 3
alpha = 0.6
for cond_only_init in [True, False]:
if cond_only_init:
h = z
tf.reset_default_graph()
emb_matrix_VxE = tf.get_variable(
'emb',
shape=[vs, es],
initializer=tf.truncated_normal_initializer(stddev=0.01))
decoder = m.GruDecoder(
h, vs, emb_matrix_VxE, 10, nl, cond_only_init=cond_only_init)
symb = decoder.decode_v(tf.random.normal((bs, h)), method='argmax')
symbr = decoder.decode_v(tf.random.normal((bs, h)), method='random')
symbb = decoder.decode_v(tf.random.normal((bs, h)), method='beam',
first_token=0, beam_size=k, alpha=alpha)
with self.session() as ss:
ss.run(tf.initializers.global_variables())
symb_np, symbr_np, symbb_np = ss.run([symb, symbr, symbb])
self.assertEqual(bs, symb_np.shape[0])
self.assertEqual(bs, symbr_np.shape[0])
self.assertEqual(bs, symbb_np.shape[0])
# Check symbols are within vocab, i.e. all following statemes are false.
self.assertIntsInRange(symb_np, 0, vs)
self.assertIntsInRange(symbr_np, 0, vs)
self.assertIntsInRange(symbb_np, 0, vs)
# TODO(peterjliu): Test _not_finished separately
def test_gru_decoder_decode_v_gumbel(self):
vs = 9
h = 10
z = 2
bs = 5
es = 3
nl = 6
for cond_only_init in [True, False]:
if cond_only_init:
h = z
tf.reset_default_graph()
emb_matrix_VxE = tf.get_variable(
'emb',
shape=[vs, es],
initializer=tf.truncated_normal_initializer(stddev=0.01))
decoder = m.GruDecoder(
h, vs, emb_matrix_VxE, 10, nl, cond_only_init=cond_only_init)
symb_BxM, symb_emb_BxMxE = decoder.decode_v_gumbel(
tf.random.normal((bs, h)))
with self.session() as ss:
ss.run(tf.initializers.global_variables())
symb_np, symbe_np = ss.run([symb_BxM, symb_emb_BxMxE])
# pylint: disable=g-generic-assert
self.assertEqual(2, len(symb_np.shape))
self.assertEqual(3, len(symbe_np.shape))
self.assertEqual(bs, symb_np.shape[0])
self.assertEqual(bs, symbe_np.shape[0])
self.assertEqual(es, symbe_np.shape[2])
# Check symbols are within vocab, i.e. all following stateme are false.
self.assertIntsInRange(symb_np, 0, vs)
def test_transformer_decoder_teacher_force(self):
vs = 9
h = 4
f = 12
z = 8
es = 4
bs = 5
nl = 4
nh = 2
max_seq_length = 11
for cond_by_addition in [True, False]:
if cond_by_addition:
z = es
tf.reset_default_graph()
emb_matrix_VxE = tf.get_variable(
'emb',
shape=[vs, es],
initializer=tf.truncated_normal_initializer(stddev=0.01))
decoder = m.TransformerDecoder(num_layers=nl, num_heads=nh, hidden_size=h,
filter_size=f, attention_dropout=0.1,
relu_dropout=0.1, postprocess_dropout=0.1,
embed_VxE=emb_matrix_VxE, vocab_size=vs,
max_steps=10, latent_size=z,
tie_embeddings=False,
cond_by_addition=cond_by_addition)
# Test teacher_force shapes
dec_out = decoder.teacher_force(
cond_input_BxZ=tf.random.normal((bs, z)),
dec_inputs_BxSxE=tf.random.normal((bs, max_seq_length, es)))
with self.session() as ss:
ss.run(tf.initializers.global_variables())
dec_out_np = ss.run(dec_out)
self.assertEqual((bs, max_seq_length, vs), dec_out_np.shape)
def test_transformer_decoder_decode_v(self):
vs = 9
h = 4
f = 12
z = 8
es = 4
bs = 5
nl = 4
nh = 2
k = 3
alpha = 0.6
for cond_by_addition in [True, False]:
if cond_by_addition:
z = es
tf.reset_default_graph()
emb_matrix_VxE = tf.get_variable(
'emb',
shape=[vs, es],
initializer=tf.truncated_normal_initializer(stddev=0.01))
decoder = m.TransformerDecoder(num_layers=nl, num_heads=nh, hidden_size=h,
filter_size=f, attention_dropout=0.1,
relu_dropout=0.1, postprocess_dropout=0.1,
embed_VxE=emb_matrix_VxE, vocab_size=vs,
max_steps=10, latent_size=z,
tie_embeddings=False,
cond_by_addition=cond_by_addition)
symb = decoder.decode_v(tf.random.normal((bs, z)))
symbb = decoder.decode_v(tf.random.normal((bs, z)), method='beam',
first_token=0, beam_size=k, alpha=alpha)
with self.session() as ss:
ss.run(tf.initializers.global_variables())
symb_np, symbb_np = ss.run([symb, symbb])
# Check shape
self.assertEqual(bs, symb_np.shape[0])
self.assertEqual(bs, symbb_np.shape[0])
# Check decoded symbols are legit IDs
self.assertIntsInRange(symb_np, 0, vs)
self.assertIntsInRange(symbb_np, 0, vs)
def test_id_seq_length(self):
self.assertAllEqual([3, 2],
m.id_seq_length(tf.constant([[1, 2, 3, 0, 0, 0],
[2, 3, 0, 0, 0, 0]],
dtype=tf.int64)))
def test_create_perm_label_table(self):
N = 3
perm_label_table = m.create_perm_label_table(N)
perms_B = tf.constant(['012', '021', '102', '120', '201', '210'],
dtype=tf.string)
with self.session() as ss:
ss.run(tf.tables_initializer())
labels_B = ss.run(perm_label_table.lookup(perms_B))
# Check if it's unique class ID for each permutation.
self.assertAllEqual(np.sort(labels_B), np.array([0, 1, 2, 3, 4, 5]))
def test_convert_sents_to_paragraphs(self):
s_ids_BxNxL = tf.constant([
[[5, 7, 1, 0], [4, 3, 1, 0], [6, 1, 0, 0]],
[[2, 2, 3, 1], [3, 1, 0, 0], [4, 2, 1, 0]],
[[5, 5, 5, 1], [5, 5, 5, 1], [5, 5, 1, 0]]], dtype=tf.int64)
p_ids_BxS = m.convert_sents_to_paragraph(s_ids_BxNxL, 3)
self.assertAllEqual(
p_ids_BxS,
tf.constant([[5, 7, 4, 3, 6, 1, 0, 0, 0],
[2, 2, 3, 3, 4, 2, 1, 0, 0],
[5, 5, 5, 5, 5, 5, 5, 5, 1]], dtype=tf.int64))
def test_swap_sentences_with_scheme(self):
# (B, N, L) = (2, 3, 6)
s_ids_BxNxL = tf.constant([
[[3, 2, 3, 1, 0, 0], [2, 2, 6, 3, 1, 0], [2, 3, 4, 5, 6, 1]],
[[4, 3, 1, 0, 0, 0], [7, 6, 8, 2, 4, 1], [2, 5, 7, 7, 1, 0]]],
dtype=tf.int64)
p_swapped_ids_BxS = m.corrupt_paragraph_with_scheme(s_ids_BxNxL,
scheme='last_two')
self.assertAllEqual(
p_swapped_ids_BxS,
tf.constant([[3, 2, 3, 2, 3, 4, 5, 6, 2, 2, 6, 3, 1],
[4, 3, 2, 5, 7, 7, 7, 6, 8, 2, 4, 1, 0]],
dtype=tf.int64))
def test_add_eos_2d(self):
b = tf.constant([
[3, 2, 4, 0,],
[5, 9, 2, 4,],
[5, 0, 0, 0,]], dtype=tf.int64)
self.assertAllEqual([
[3, 2, 4, 1, 0],
[5, 9, 2, 4, 1],
[5, 1, 0, 0, 0]], m.add_eos_2d(b))
def test_random_mask_like(self):
b = tf.constant([[3, 2, 4, 1, 0], [5, 9, 2, 4, 1], [5, 1, 0, 0, 0]],
dtype=tf.int64)
l = tf.constant([4, 5, 2])
b_mask_all_but_eos = tf.constant([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0],
[1, 0, 0, 0, 0]])
self.assertAllEqual(
tf.zeros_like(b), m.random_mask_like(b, l, 0.0, not_mask_eos=True))
self.assertAllEqual(b_mask_all_but_eos,
m.random_mask_like(b, l, 1.0, not_mask_eos=True))
def test_mask_ids(self):
b = tf.constant([[3, 2, 4, 1, 0], [5, 9, 2, 4, 1], [5, 1, 0, 0, 0]],
dtype=tf.int64)
l = tf.constant([4, 5, 2])
mask_id = 32583
mask_all_but_eos = tf.constant([[mask_id, mask_id, mask_id, 1, 0],
[mask_id, mask_id, mask_id, mask_id, 1],
[mask_id, 1, 0, 0, 0]])
self.assertAllEqual(b, m.mask_ids(b, l, 0.0, mask_id))
self.assertAllEqual(mask_all_but_eos, m.mask_ids(b, l, 1.0, mask_id))
def test_mask_embs(self):
# suppose [1,1,1], [0,0,0], [2,2,2] are embeddings for <eos>, <pad>, <mask>
emb_size = 3
b_BxSxE = tf.constant([[[3, 4, 5], [3, 1, 2], [1, 1, 1], [0, 0, 0]],
[[2, 3, 1], [0, 1, 2], [1, 1, 1], [0, 0, 0]],
[[0, 3, 1], [1, 1, 1], [0, 0, 0], [0, 0, 0]]],
dtype=tf.float32)
b_mask_all_but_eos = tf.constant(
[[[2, 2, 2], [2, 2, 2], [1, 1, 1], [0, 0, 0]],
[[2, 2, 2], [2, 2, 2], [1, 1, 1], [0, 0, 0]],
[[2, 2, 2], [1, 1, 1], [0, 0, 0], [0, 0, 0]]],
dtype=tf.float32)
l_B = tf.constant([3, 3, 2])
mask_emb_E = tf.ones([emb_size]) * 2.0
masked_BxSxE = m.mask_embs(b_BxSxE, l_B, 0.0, mask_emb_E)
self.assertAllEqual(b_BxSxE, masked_BxSxE)
masked_BxSxE = m.mask_embs(b_BxSxE, l_B, 1.0, mask_emb_E)
self.assertAllEqual(b_mask_all_but_eos, masked_BxSxE)
def test_apply_mask_to_embs(self):
emb_size = 3
mask_emb_E = tf.ones([emb_size]) * 2.0
b_BxSxE = tf.constant([[[3, 4, 5], [3, 1, 2], [1, 1, 1], [0, 0, 0]],
[[2, 3, 1], [0, 1, 2], [1, 1, 1], [0, 0, 0]],
[[0, 3, 1], [1, 1, 1], [0, 0, 0], [0, 0, 0]]],
dtype=tf.float32)
mask_BxSx1 = tf.expand_dims(
tf.constant([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]],
dtype=tf.float32),
axis=2)
b_mask_true = tf.constant([[[3, 4, 5], [2, 2, 2], [1, 1, 1], [0, 0, 0]],
[[2, 2, 2], [0, 1, 2], [1, 1, 1], [0, 0, 0]],
[[0, 3, 1], [1, 1, 1], [0, 0, 0], [0, 0, 0]]],
dtype=tf.float32)
b_mask_BxSxE = m.apply_mask_to_embs(b_BxSxE, mask_BxSx1, mask_emb_E)
self.assertAllEqual(b_mask_true, b_mask_BxSxE)
def test_mask_random_pos(self):
b = tf.constant([[3, 2, 4, 0, 0], [5, 9, 2, 4, 5], [5, 0, 0, 0, 0]],
dtype=tf.int64)
keep_rate = 1.0
self.assertAllEqual(tf.ones_like(b), m.mask_random_pos(b, keep_rate))
def test_reduce_mean_weighted(self):
b_N = tf.constant([0.5, 0.5]) # assume sum(b_N) = 1
x_BxNxZ = tf.constant([[[3, 4, 5], [6, 7, 8]], [[1, 2, 3], [4, 5, 6]]],
tf.float32)
self.assertAllEqual(
tf.reduce_mean(x_BxNxZ, axis=1), m.reduce_mean_weighted(x_BxNxZ, b_N))
b_N = tf.constant([0.0, 1.0])
self.assertAllEqual(x_BxNxZ[:, 1, :], m.reduce_mean_weighted(x_BxNxZ, b_N))
def test_get_features_labels(self):
B = 3
Z = 5
pos_np_BxZ = np.random.random((B, Z))
neg_np_BxZ = np.random.random((B, Z))
pos_BxZ = tf.constant(pos_np_BxZ)
neg_BxZ = tf.constant(neg_np_BxZ)
features_2BxZ, labels_2B = m.get_features_labels(pos_BxZ, neg_BxZ)
with self.session() as ss:
ss.run(tf.initializers.global_variables())
ss.run(tf.initializers.local_variables())
f_np, l_np = ss.run([features_2BxZ, labels_2B])
self.assertAllEqual(f_np, np.concatenate([pos_np_BxZ, neg_np_BxZ], axis=0))
self.assertAllEqual(l_np, np.concatenate([np.ones(B), np.zeros(B)]))
def test_get_discriminator(self):
x = tf.random.normal((10, 4))
disc_fn = m.get_discriminator(3, 'd_')
logits_B = disc_fn(x)
self.assertNotEmpty(tf.global_variables('d_'))
with self.session() as ss:
ss.run(tf.initializers.global_variables())
ss.run(tf.initializers.local_variables())
l_np = ss.run(logits_B)
self.assertEqual((10,), l_np.shape)
def test_dense_layer_with_proj(self):
H = 5
O = 6
B = 4
S = 3
I = 2
x = tf.random.normal((B, S, I))
p = tf.random.normal((H, O))
_ = m.dense_layer_with_proj(x, p, scope='ts_')
tf.logging.info(tf.global_variables('ts_'))
num_vars = len(tf.global_variables('ts_'))
_ = m.dense_layer_with_proj(x, p, scope='ts_')
# Test that dense layer is re-used.
self.assertLen(tf.global_variables('ts_'), num_vars)
if __name__ == '__main__':
tf.test.main()
|
|
# -*- coding: utf-8 -*-
"""
gspread.client
~~~~~~~~~~~~~~
This module contains Client class responsible for communicating with
Google Data API.
"""
import re
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from xml.etree import ElementTree
from . import __version__
from .ns import _ns
from .httpsession import HTTPSession, HTTPError
from .models import Spreadsheet
from .urls import construct_url
from .utils import finditem
from .exceptions import (AuthenticationError, SpreadsheetNotFound,
NoValidUrlKeyFound, UpdateCellError,
RequestError)
AUTH_SERVER = 'https://www.google.com'
SPREADSHEETS_SERVER = 'spreadsheets.google.com'
_url_key_re = re.compile(r'key=([^&#]+)')
class Client(object):
"""An instance of this class communicates with Google Data API.
:param auth: A tuple containing an *email* and a *password* used for ClientLogin
authentication.
:param http_session: (optional) A session object capable of making HTTP requests while persisting headers.
Defaults to :class:`~gspread.httpsession.HTTPSession`.
>>> c = gspread.Client(auth=('user@example.com', 'qwertypassword'))
>>>
"""
def __init__(self, auth, http_session=None):
self.auth = auth
if not http_session:
self.session = HTTPSession()
def _get_auth_token(self, content):
for line in content.splitlines():
if line.startswith('Auth='):
return line[5:]
return None
def _add_xml_header(self, data):
return "<?xml version='1.0' encoding='UTF-8'?>%s" % data.decode()
def login(self):
"""Authorize client using ClientLogin protocol.
The credentials provided in `auth` parameter to class' constructor will be used.
This method is using API described at:
http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html
:raises AuthenticationError: if login attempt fails.
"""
source = 'burnash-gspread-%s' % __version__
service = 'wise'
data = {'Email': self.auth[0],
'Passwd': self.auth[1],
'accountType': 'HOSTED_OR_GOOGLE',
'service': service,
'source': source}
url = AUTH_SERVER + '/accounts/ClientLogin'
try:
r = self.session.post(url, data)
content = r.read().decode()
token = self._get_auth_token(content)
auth_header = "GoogleLogin auth=%s" % token
self.session.add_header('Authorization', auth_header)
except HTTPError as ex:
if ex.code == 403:
content = ex.read().decode()
if content.strip() == 'Error=BadAuthentication':
raise AuthenticationError("Incorrect username or password")
else:
raise AuthenticationError(
"Unable to authenticate. %s code" % ex.code)
else:
raise AuthenticationError(
"Unable to authenticate. %s code" % ex.code)
def open(self, title):
"""Opens a spreadsheet, returning a :class:`~gspread.Spreadsheet` instance.
:param title: A title of a spreadsheet.
If there's more than one spreadsheet with same title the first one
will be opened.
:raises gspread.SpreadsheetNotFound: if no spreadsheet with
specified `title` is found.
>>> c = gspread.Client(auth=('user@example.com', 'qwertypassword'))
>>> c.login()
>>> c.open('My fancy spreadsheet')
"""
feed = self.get_spreadsheets_feed()
for elem in feed.findall(_ns('entry')):
elem_title = elem.find(_ns('title')).text
if elem_title.strip() == title:
return Spreadsheet(self, elem)
else:
raise SpreadsheetNotFound
def open_by_key(self, key):
"""Opens a spreadsheet specified by `key`, returning a :class:`~gspread.Spreadsheet` instance.
:param key: A key of a spreadsheet as it appears in a URL in a browser.
:raises gspread.SpreadsheetNotFound: if no spreadsheet with
specified `key` is found.
>>> c = gspread.Client(auth=('user@example.com', 'qwertypassword'))
>>> c.login()
>>> c.open_by_key('0BmgG6nO_6dprdS1MN3d3MkdPa142WFRrdnRRUWl1UFE')
"""
feed = self.get_spreadsheets_feed()
for elem in feed.findall(_ns('entry')):
alter_link = finditem(lambda x: x.get('rel') == 'alternate',
elem.findall(_ns('link')))
m = _url_key_re.search(alter_link.get('href'))
if m and m.group(1) == key:
return Spreadsheet(self, elem)
else:
raise SpreadsheetNotFound
def open_by_url(self, url):
"""Opens a spreadsheet specified by `url`,
returning a :class:`~gspread.Spreadsheet` instance.
:param url: URL of a spreadsheet as it appears in a browser.
:raises gspread.SpreadsheetNotFound: if no spreadsheet with
specified `url` is found.
>>> c = gspread.Client(auth=('user@example.com', 'qwertypassword'))
>>> c.login()
>>> c.open_by_url('https://docs.google.com/spreadsheet/ccc?key=0Bm...FE&hl')
"""
m = _url_key_re.search(url)
if m:
return self.open_by_key(m.group(1))
else:
raise NoValidUrlKeyFound
def openall(self, title=None):
"""Opens all available spreadsheets,
returning a list of a :class:`~gspread.Spreadsheet` instances.
:param title: (optional) If specified can be used to filter
spreadsheets by title.
"""
feed = self.get_spreadsheets_feed()
result = []
for elem in feed.findall(_ns('entry')):
if title is not None:
elem_title = elem.find(_ns('title')).text
if elem_title.strip() != title:
continue
result.append(Spreadsheet(self, elem))
return result
def get_spreadsheets_feed(self, visibility='private', projection='full'):
url = construct_url('spreadsheets',
visibility=visibility, projection=projection)
r = self.session.get(url)
return ElementTree.fromstring(r.read())
def get_worksheets_feed(self, spreadsheet,
visibility='private', projection='full'):
url = construct_url('worksheets', spreadsheet,
visibility=visibility, projection=projection)
r = self.session.get(url)
return ElementTree.fromstring(r.read())
def get_cells_feed(self, worksheet,
visibility='private', projection='full', params=None):
url = construct_url('cells', worksheet,
visibility=visibility, projection=projection)
if params:
params = urlencode(params)
url = '%s?%s' % (url, params)
r = self.session.get(url)
return ElementTree.fromstring(r.read())
def get_feed(self, url):
r = self.session.get(url)
return ElementTree.fromstring(r.read())
def del_worksheet(self, worksheet):
url = construct_url(
'worksheet', worksheet, 'private', 'full', worksheet_version=worksheet.version)
self.session.delete(url)
def get_cells_cell_id_feed(self, worksheet, cell_id,
visibility='private', projection='full'):
url = construct_url('cells_cell_id', worksheet, cell_id=cell_id,
visibility=visibility, projection=projection)
r = self.session.get(url)
return ElementTree.fromstring(r.read())
def put_feed(self, url, data):
headers = {'Content-Type': 'application/atom+xml'}
data = self._add_xml_header(data)
try:
r = self.session.put(url, data, headers=headers)
except HTTPError as ex:
if ex.code == 403:
message = ex.read().decode()
raise UpdateCellError(message)
else:
raise ex
return ElementTree.fromstring(r.read())
def post_feed(self, url, data):
headers = {'Content-Type': 'application/atom+xml'}
data = self._add_xml_header(data)
try:
r = self.session.post(url, data, headers=headers)
except HTTPError as ex:
message = ex.read().decode()
raise RequestError(message)
return ElementTree.fromstring(r.read())
def post_cells(self, worksheet, data):
headers = {'Content-Type': 'application/atom+xml'}
data = self._add_xml_header(data)
url = construct_url('cells_batch', worksheet)
r = self.session.post(url, data, headers=headers)
return ElementTree.fromstring(r.read())
def login(email, password):
"""Login to Google API using `email` and `password`.
This is a shortcut function which instantiates :class:`Client`
and performes login right away.
:returns: :class:`Client` instance.
"""
client = Client(auth=(email, password))
client.login()
return client
|
|
# Copyright (c) 2019 Pieter Wuille
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test-only secp256k1 elliptic curve implementation
WARNING: This code is slow, uses bad randomness, does not properly protect
keys, and is trivially vulnerable to side channel attacks. Do not use for
anything but tests."""
import random
def modinv(a, n):
"""Compute the modular inverse of a modulo n
See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
"""
t1, t2 = 0, 1
r1, r2 = n, a
while r2 != 0:
q = r1 // r2
t1, t2 = t2, t1 - q * t2
r1, r2 = r2, r1 - q * r2
if r1 > 1:
return None
if t1 < 0:
t1 += n
return t1
def jacobi_symbol(n, k):
"""Compute the Jacobi symbol of n modulo k
See http://en.wikipedia.org/wiki/Jacobi_symbol
For our application k is always prime, so this is the same as the Legendre symbol."""
assert k > 0 and k & 1, "jacobi symbol is only defined for positive odd k"
n %= k
t = 0
while n != 0:
while n & 1 == 0:
n >>= 1
r = k & 7
t ^= (r == 3 or r == 5)
n, k = k, n
t ^= (n & k & 3 == 3)
n = n % k
if k == 1:
return -1 if t else 1
return 0
def modsqrt(a, p):
"""Compute the square root of a modulo p when p % 4 = 3.
The Tonelli-Shanks algorithm can be used. See https://en.wikipedia.org/wiki/Tonelli-Shanks_algorithm
Limiting this function to only work for p % 4 = 3 means we don't need to
iterate through the loop. The highest n such that p - 1 = 2^n Q with Q odd
is n = 1. Therefore Q = (p-1)/2 and sqrt = a^((Q+1)/2) = a^((p+1)/4)
secp256k1's is defined over field of size 2**256 - 2**32 - 977, which is 3 mod 4.
"""
if p % 4 != 3:
raise NotImplementedError("modsqrt only implemented for p % 4 = 3")
sqrt = pow(a, (p + 1)//4, p)
if pow(sqrt, 2, p) == a % p:
return sqrt
return None
class EllipticCurve:
def __init__(self, p, a, b):
"""Initialize elliptic curve y^2 = x^3 + a*x + b over GF(p)."""
self.p = p
self.a = a % p
self.b = b % p
def affine(self, p1):
"""Convert a Jacobian point tuple p1 to affine form, or None if at infinity.
An affine point is represented as the Jacobian (x, y, 1)"""
x1, y1, z1 = p1
if z1 == 0:
return None
inv = modinv(z1, self.p)
inv_2 = (inv**2) % self.p
inv_3 = (inv_2 * inv) % self.p
return ((inv_2 * x1) % self.p, (inv_3 * y1) % self.p, 1)
def negate(self, p1):
"""Negate a Jacobian point tuple p1."""
x1, y1, z1 = p1
return (x1, (self.p - y1) % self.p, z1)
def on_curve(self, p1):
"""Determine whether a Jacobian tuple p is on the curve (and not infinity)"""
x1, y1, z1 = p1
z2 = pow(z1, 2, self.p)
z4 = pow(z2, 2, self.p)
return z1 != 0 and (pow(x1, 3, self.p) + self.a * x1 * z4 + self.b * z2 * z4 - pow(y1, 2, self.p)) % self.p == 0
def is_x_coord(self, x):
"""Test whether x is a valid X coordinate on the curve."""
x_3 = pow(x, 3, self.p)
return jacobi_symbol(x_3 + self.a * x + self.b, self.p) != -1
def lift_x(self, x):
"""Given an X coordinate on the curve, return a corresponding affine point."""
x_3 = pow(x, 3, self.p)
v = x_3 + self.a * x + self.b
y = modsqrt(v, self.p)
if y is None:
return None
return (x, y, 1)
def double(self, p1):
"""Double a Jacobian tuple p1
See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Doubling"""
x1, y1, z1 = p1
if z1 == 0:
return (0, 1, 0)
y1_2 = (y1**2) % self.p
y1_4 = (y1_2**2) % self.p
x1_2 = (x1**2) % self.p
s = (4*x1*y1_2) % self.p
m = 3*x1_2
if self.a:
m += self.a * pow(z1, 4, self.p)
m = m % self.p
x2 = (m**2 - 2*s) % self.p
y2 = (m*(s - x2) - 8*y1_4) % self.p
z2 = (2*y1*z1) % self.p
return (x2, y2, z2)
def add_mixed(self, p1, p2):
"""Add a Jacobian tuple p1 and an affine tuple p2
See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Addition (with affine point)"""
x1, y1, z1 = p1
x2, y2, z2 = p2
assert(z2 == 1)
# Adding to the point at infinity is a no-op
if z1 == 0:
return p2
z1_2 = (z1**2) % self.p
z1_3 = (z1_2 * z1) % self.p
u2 = (x2 * z1_2) % self.p
s2 = (y2 * z1_3) % self.p
if x1 == u2:
if (y1 != s2):
# p1 and p2 are inverses. Return the point at infinity.
return (0, 1, 0)
# p1 == p2. The formulas below fail when the two points are equal.
return self.double(p1)
h = u2 - x1
r = s2 - y1
h_2 = (h**2) % self.p
h_3 = (h_2 * h) % self.p
u1_h_2 = (x1 * h_2) % self.p
x3 = (r**2 - h_3 - 2*u1_h_2) % self.p
y3 = (r*(u1_h_2 - x3) - y1*h_3) % self.p
z3 = (h*z1) % self.p
return (x3, y3, z3)
def add(self, p1, p2):
"""Add two Jacobian tuples p1 and p2
See https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates - Point Addition"""
x1, y1, z1 = p1
x2, y2, z2 = p2
# Adding the point at infinity is a no-op
if z1 == 0:
return p2
if z2 == 0:
return p1
# Adding an Affine to a Jacobian is more efficient since we save field multiplications and squarings when z = 1
if z1 == 1:
return self.add_mixed(p2, p1)
if z2 == 1:
return self.add_mixed(p1, p2)
z1_2 = (z1**2) % self.p
z1_3 = (z1_2 * z1) % self.p
z2_2 = (z2**2) % self.p
z2_3 = (z2_2 * z2) % self.p
u1 = (x1 * z2_2) % self.p
u2 = (x2 * z1_2) % self.p
s1 = (y1 * z2_3) % self.p
s2 = (y2 * z1_3) % self.p
if u1 == u2:
if (s1 != s2):
# p1 and p2 are inverses. Return the point at infinity.
return (0, 1, 0)
# p1 == p2. The formulas below fail when the two points are equal.
return self.double(p1)
h = u2 - u1
r = s2 - s1
h_2 = (h**2) % self.p
h_3 = (h_2 * h) % self.p
u1_h_2 = (u1 * h_2) % self.p
x3 = (r**2 - h_3 - 2*u1_h_2) % self.p
y3 = (r*(u1_h_2 - x3) - s1*h_3) % self.p
z3 = (h*z1*z2) % self.p
return (x3, y3, z3)
def mul(self, ps):
"""Compute a (multi) point multiplication
ps is a list of (Jacobian tuple, scalar) pairs.
"""
r = (0, 1, 0)
for i in range(255, -1, -1):
r = self.double(r)
for (p, n) in ps:
if ((n >> i) & 1):
r = self.add(r, p)
return r
SECP256K1 = EllipticCurve(2**256 - 2**32 - 977, 0, 7)
SECP256K1_G = (0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8, 1)
SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
class ECPubKey():
"""A secp256k1 public key"""
def __init__(self):
"""Construct an uninitialized public key"""
self.valid = False
def set(self, data):
"""Construct a public key from a serialization in compressed or uncompressed format"""
if (len(data) == 65 and data[0] == 0x04):
p = (int.from_bytes(data[1:33], 'big'), int.from_bytes(data[33:65], 'big'), 1)
self.valid = SECP256K1.on_curve(p)
if self.valid:
self.p = p
self.compressed = False
elif (len(data) == 33 and (data[0] == 0x02 or data[0] == 0x03)):
x = int.from_bytes(data[1:33], 'big')
if SECP256K1.is_x_coord(x):
p = SECP256K1.lift_x(x)
# if the oddness of the y co-ord isn't correct, find the other
# valid y
if (p[1] & 1) != (data[0] & 1):
p = SECP256K1.negate(p)
self.p = p
self.valid = True
self.compressed = True
else:
self.valid = False
else:
self.valid = False
@property
def is_compressed(self):
return self.compressed
@property
def is_valid(self):
return self.valid
def get_bytes(self):
assert(self.valid)
p = SECP256K1.affine(self.p)
if p is None:
return None
if self.compressed:
return bytes([0x02 + (p[1] & 1)]) + p[0].to_bytes(32, 'big')
else:
return bytes([0x04]) + p[0].to_bytes(32, 'big') + p[1].to_bytes(32, 'big')
def verify_ecdsa(self, sig, msg, low_s=True):
"""Verify a strictly DER-encoded ECDSA signature against this pubkey.
See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the
ECDSA verifier algorithm"""
assert(self.valid)
# Extract r and s from the DER formatted signature. Return false for
# any DER encoding errors.
if (sig[1] + 2 != len(sig)):
return False
if (len(sig) < 4):
return False
if (sig[0] != 0x30):
return False
if (sig[2] != 0x02):
return False
rlen = sig[3]
if (len(sig) < 6 + rlen):
return False
if rlen < 1 or rlen > 33:
return False
if sig[4] >= 0x80:
return False
if (rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80)):
return False
r = int.from_bytes(sig[4:4+rlen], 'big')
if (sig[4+rlen] != 0x02):
return False
slen = sig[5+rlen]
if slen < 1 or slen > 33:
return False
if (len(sig) != 6 + rlen + slen):
return False
if sig[6+rlen] >= 0x80:
return False
if (slen > 1 and (sig[6+rlen] == 0) and not (sig[7+rlen] & 0x80)):
return False
s = int.from_bytes(sig[6+rlen:6+rlen+slen], 'big')
# Verify that r and s are within the group order
if r < 1 or s < 1 or r >= SECP256K1_ORDER or s >= SECP256K1_ORDER:
return False
if low_s and s >= SECP256K1_ORDER_HALF:
return False
z = int.from_bytes(msg, 'big')
# Run verifier algorithm on r, s
w = modinv(s, SECP256K1_ORDER)
u1 = z*w % SECP256K1_ORDER
u2 = r*w % SECP256K1_ORDER
R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, u1), (self.p, u2)]))
if R is None or (R[0] % SECP256K1_ORDER) != r:
return False
return True
class ECKey():
"""A secp256k1 private key"""
def __init__(self):
self.valid = False
def set(self, secret, compressed):
"""Construct a private key object with given 32-byte secret and compressed flag."""
assert(len(secret) == 32)
secret = int.from_bytes(secret, 'big')
self.valid = (secret > 0 and secret < SECP256K1_ORDER)
if self.valid:
self.secret = secret
self.compressed = compressed
def generate(self, compressed=True):
"""Generate a random private key (compressed or uncompressed)."""
self.set(random.randrange(1, SECP256K1_ORDER).to_bytes(32, 'big'), compressed)
def get_bytes(self):
"""Retrieve the 32-byte representation of this key."""
assert(self.valid)
return self.secret.to_bytes(32, 'big')
@property
def is_valid(self):
return self.valid
@property
def is_compressed(self):
return self.compressed
def get_pubkey(self):
"""Compute an ECPubKey object for this secret key."""
assert(self.valid)
ret = ECPubKey()
p = SECP256K1.mul([(SECP256K1_G, self.secret)])
ret.p = p
ret.valid = True
ret.compressed = self.compressed
return ret
def sign_ecdsa(self, msg, low_s=True):
"""Construct a DER-encoded ECDSA signature with this key.
See https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm for the
ECDSA signer algorithm."""
assert(self.valid)
z = int.from_bytes(msg, 'big')
# Note: no RFC6979, but a simple random nonce (some tests rely on distinct transactions for the same operation)
k = random.randrange(1, SECP256K1_ORDER)
R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, k)]))
r = R[0] % SECP256K1_ORDER
s = (modinv(k, SECP256K1_ORDER) * (z + self.secret * r)) % SECP256K1_ORDER
if low_s and s > SECP256K1_ORDER_HALF:
s = SECP256K1_ORDER - s
# Represent in DER format. The byte representations of r and s have
# length rounded up (255 bits becomes 32 bytes and 256 bits becomes 33
# bytes).
rb = r.to_bytes((r.bit_length() + 8) // 8, 'big')
sb = s.to_bytes((s.bit_length() + 8) // 8, 'big')
return b'\x30' + bytes([4 + len(rb) + len(sb), 2, len(rb)]) + rb + bytes([2, len(sb)]) + sb
|
|
from flask_sqlalchemy import BaseQuery
from sqlalchemy.dialects.postgresql import INET, MACADDR
from sqlalchemy import distinct, desc
from sqlalchemy.sql import func
from . import db, id_column, get_db_binding
DB_BINDING = get_db_binding(__name__)
class Account(db.Model):
"""OpenStack Account"""
__bind_key__ = DB_BINDING
id = id_column()
openstack_id = db.Column(db.String(128), unique=True, nullable=False)
instances = db.relationship("Instance", backref="account")
def json(self):
"""Jsonify"""
return {"id": self.id, "openstack_id": self.openstack_id}
class Tenant(db.Model):
"""OpenStack Tenant"""
__bind_key__ = DB_BINDING
id = id_column()
openstack_id = db.Column(db.String(128), unique=True, nullable=False)
instances = db.relationship("Instance", backref="tenant")
def json(self):
"""Jsonify"""
return {"id": self.id, "openstack_id": self.openstack_id}
class AvailabilityZone(db.Model):
"""OpenStack AZ/Cell"""
__bind_key__ = DB_BINDING
id = id_column()
name = db.Column(db.String(64), unique=True, nullable=False)
hypervisors = db.relationship("Hypervisor", backref="availability_zone")
instances = db.relationship("Instance", backref="availability_zone")
def json(self):
"""Jsonify"""
return {"id": self.id, "name": self.name}
class Instance(db.Model):
"""OpenStack VM/Instance"""
__bind_key__ = DB_BINDING
id = id_column()
openstack_id = db.Column(db.String(128), unique=True, nullable=False)
availability_zone_id = db.Column(None,
db.ForeignKey("availability_zone.id"),
index=True,
nullable=False)
account_id = db.Column(None,
db.ForeignKey("account.id"),
index=True,
nullable=False)
tenant_id = db.Column(None,
db.ForeignKey("tenant.id"),
index=True,
nullable=False)
flavor_id = db.Column(None,
db.ForeignKey("flavor.id"),
index=True,
nullable=False)
instance_states = db.relationship("InstanceState", backref="instance")
ip_addresses = db.relationship("IPAddressMapping", backref="instance")
mac_addresses = db.relationship("MACAddressMapping", backref="instance")
def latest_state(self, start_ts, end_ts):
""" Get full information of the latest state of an instance in the query date range
This includes its internal instance_id, Instnace.openstack_id (server_id),
InstanceState.name(server), Hypervisor.name (hypervisor),
AvailabilityZone.name (az), Flavor.openstack_id (flavor),
Image.openstack_id (image), life span (span), linked
Account.openstack_id (account), Tenant.openstack_id (tenant).
"""
state = InstanceState.latest(self.id, start_ts, end_ts)
state["instance_id"] = self.id
state["server_id"] = self.openstack_id
state['account'] = Account.query.get(self.account_id).openstack_id
state['tenant'] = Tenant.query.get(self.tenant_id).openstack_id
state['flavor'] = Flavor.query.get(self.flavor_id).openstack_id
state['az'] = AvailabilityZone.query.get(self.availability_zone_id).name
return state
def json(self):
"""Jsonify"""
return {
"id": self.id,
"openstack_id": self.openstack_id,
"account": self.account_id,
"tenant": self.tenant_id,
"flavor": self.flavor_id
}
class Snapshot(db.Model):
"""A snapshot of the world."""
__bind_key__ = DB_BINDING
id = id_column()
ts = db.Column(db.Integer, unique=True, nullable=False)
instance_states = db.relationship("InstanceState", backref="snapshot")
ip_address_mappings = db.relationship("IPAddressMapping",
backref="snapshot")
mac_address_mappings = db.relationship("MACAddressMapping",
backref="snapshot")
def json(self):
"""Jsonify"""
return {"id": self.id, "ts": self.ts}
class IPAddress(db.Model):
"""IP Address (v4 or v6)"""
__bind_key__ = DB_BINDING
id = id_column()
address = db.Column(INET, unique=True, nullable=False)
family = db.Column(db.Integer, index=True, nullable=False)
mappings = db.relationship("IPAddressMapping", backref="address")
def json(self):
"""Jsonify"""
return {"id": self.id, "address": self.address, "family": self.family}
class MACAddress(db.Model):
"""MAC Address"""
__bind_key__ = DB_BINDING
id = id_column()
address = db.Column(MACADDR, unique=True, nullable=False)
mappings = db.relationship("MACAddressMapping", backref="address")
def json(self):
"""Jsonify"""
return {"id": self.id, "address": self.address}
class Flavor(db.Model):
"""OpenStack Flavor"""
__bind_key__ = DB_BINDING
id = id_column()
openstack_id = db.Column(db.String(128), unique=True, nullable=False)
name = db.Column(db.String(64), nullable=False)
vcpus = db.Column(db.Integer)
ram = db.Column(db.Integer)
disk = db.Column(db.Integer)
ephemeral = db.Column(db.Integer)
public = db.Column(db.Boolean)
instances = db.relationship("Instance", backref="flavor")
def json(self):
"""Jsonify"""
return {
"id": self.id,
"openstack_id": self.openstack_id,
"name": self.name,
"vcpus": self.vcpus,
"ram": self.ram,
"disk": self.disk,
"ephemeral": self.ephemeral,
"public": self.public
}
class Hypervisor(db.Model):
"""OpenStack Hypervisor"""
__bind_key__ = DB_BINDING
id = id_column()
name = db.Column(db.String(128), nullable=False)
availability_zone_id = db.Column(None,
db.ForeignKey("availability_zone.id"),
nullable=False)
instance_states = db.relationship("InstanceState", backref="hypervisor")
def json(self):
"""Jsonify"""
return {
"id": self.id,
"name": self.name,
"availability_zone": self.availability_zone_id
}
class InstanceStatus(db.Model):
"""Instance States (e.g. active, error, etc.)"""
__bind_key__ = DB_BINDING
id = id_column()
name = db.Column(db.String(64), unique=True, nullable=False)
instance_state = db.relationship("InstanceState", backref="status")
def json(self):
"""Jsonify"""
return {"id": self.id, "name": self.name}
class Image(db.Model):
"""OpenStack Images"""
__bind_key__ = DB_BINDING
id = id_column()
openstack_id = db.Column(db.String(64), unique=True, nullable=False)
instance_state = db.relationship("InstanceState", backref="image")
def json(self):
"""Jsonify"""
return {"id": self.id, "openstack_id": self.openstack_id}
class InstanceState(db.Model):
"""Point-in-time OpenStack Instance State"""
__bind_key__ = DB_BINDING
id = id_column()
snapshot_id = db.Column(None,
db.ForeignKey("snapshot.id"),
index=True,
nullable=False)
instance_id = db.Column(None,
db.ForeignKey("instance.id"),
index=True,
nullable=False)
image_id = db.Column(None, db.ForeignKey("image.id"), nullable=False)
status_id = db.Column(None,
db.ForeignKey("instance_status.id"),
nullable=False)
hypervisor_id = db.Column(None,
db.ForeignKey("hypervisor.id"),
nullable=False)
name = db.Column(db.String(128), index=True, nullable=False)
def json(self):
"""Jsonify"""
return {
"id": self.id,
"name": self.name,
"snapshot": self.snapshot_id,
"instance": self.instance_id,
"image": self.image_id,
"status": self.status_id,
"hypervisor": self.hypervisor_id
}
@classmethod
def latest(cls, instance_id, start_ts=0, end_ts=0):
"""" Get the latest information of an instance in a given time range.
During the life time of an instance, image or hypervisor can be
changed, only report back the latest one.
"""
# This retrieves all InstanceState.id and Snapshot.ts in order
# to get the latest state and calculate span at once.
# This sacrifices memory usage to avoid multiple database hit.
query = db.session.query(InstanceState).join(Snapshot).\
filter(InstanceState.snapshot_id == Snapshot.id).\
filter(InstanceState.instance_id == instance_id).\
order_by(desc(Snapshot.ts))
if start_ts > 0:
query = query.filter(Snapshot.ts >= start_ts)
if end_ts > 0:
query = query.filter(Snapshot.ts < end_ts)
timely_states = query.with_entities(InstanceState.id, Snapshot.ts)
latest_state = InstanceState.query.get(timely_states[0][0])
state = {"server": latest_state.name}
# state["span"] is the difference between mapped snapshots
# The accuracy depends on snapshot resolution
state["span"] = timely_states[0][1] - timely_states[-1][1]
state["image"] = latest_state.image.openstack_id
state["hypervisor"] = Hypervisor.query.filter_by(id=latest_state.hypervisor_id).value("name")
return state
class IPAddressMapping(db.Model):
"""Point-in-time IP-Instance Mapping"""
__bind_key__ = DB_BINDING
id = id_column()
instance_id = db.Column(None,
db.ForeignKey("instance.id"),
index=True,
nullable=False)
address_id = db.Column(None,
db.ForeignKey("ip_address.id"),
index=True,
nullable=False)
snapshot_id = db.Column(None,
db.ForeignKey("snapshot.id"),
index=True,
nullable=False)
def json(self):
"""Jsonify"""
return {
"id": self.id,
"instance": self.instance_id,
"address": self.address_id,
"snapshot": self.snapshot_id
}
class MACAddressMapping(db.Model):
"""Point-in-time MAC-Instance Mapping"""
__bind_key__ = DB_BINDING
id = id_column()
instance_id = db.Column(None,
db.ForeignKey("instance.id"),
index=True,
nullable=False)
address_id = db.Column(None,
db.ForeignKey("mac_address.id"),
index=True,
nullable=False)
snapshot_id = db.Column(None,
db.ForeignKey("snapshot.id"),
index=True,
nullable=False)
def json(self):
"""Jsonify"""
return {
"id": self.id,
"instance": self.instance_id,
"address": self.address_id,
"snapshot": self.snapshot_id
}
class Summary():
"""list distinct instance on sa node between start_ts and end_ts """
query = None
def __init__(self, start_ts, end_ts):
"""Build a query to get a distinct list of instance_id bewteen sart and end ts."""
az_query = db.session.query(Hypervisor).join(AvailabilityZone).\
filter(Hypervisor.availability_zone_id == AvailabilityZone.id).\
filter(AvailabilityZone.name.like("sa%")).\
with_entities(Hypervisor.id).subquery()
self.query = BaseQuery([Snapshot, InstanceState], db.session()).\
filter(Snapshot.ts >= start_ts, Snapshot.ts < end_ts).\
filter(InstanceState.snapshot_id == Snapshot.id).\
with_entities(InstanceState.instance_id).\
distinct(InstanceState.instance_id).\
filter(InstanceState.hypervisor_id.in_(az_query))
def value(self):
return [item[0] for item in self.query.all()]
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.language_v1beta2.types import language_service
from .base import LanguageServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import LanguageServiceGrpcTransport
class LanguageServiceGrpcAsyncIOTransport(LanguageServiceTransport):
"""gRPC AsyncIO backend transport for LanguageService.
Provides text analysis operations such as sentiment analysis
and entity recognition.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "language.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "language.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def analyze_sentiment(
self,
) -> Callable[
[language_service.AnalyzeSentimentRequest],
Awaitable[language_service.AnalyzeSentimentResponse],
]:
r"""Return a callable for the analyze sentiment method over gRPC.
Analyzes the sentiment of the provided text.
Returns:
Callable[[~.AnalyzeSentimentRequest],
Awaitable[~.AnalyzeSentimentResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "analyze_sentiment" not in self._stubs:
self._stubs["analyze_sentiment"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1beta2.LanguageService/AnalyzeSentiment",
request_serializer=language_service.AnalyzeSentimentRequest.serialize,
response_deserializer=language_service.AnalyzeSentimentResponse.deserialize,
)
return self._stubs["analyze_sentiment"]
@property
def analyze_entities(
self,
) -> Callable[
[language_service.AnalyzeEntitiesRequest],
Awaitable[language_service.AnalyzeEntitiesResponse],
]:
r"""Return a callable for the analyze entities method over gRPC.
Finds named entities (currently proper names and
common nouns) in the text along with entity types,
salience, mentions for each entity, and other
properties.
Returns:
Callable[[~.AnalyzeEntitiesRequest],
Awaitable[~.AnalyzeEntitiesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "analyze_entities" not in self._stubs:
self._stubs["analyze_entities"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1beta2.LanguageService/AnalyzeEntities",
request_serializer=language_service.AnalyzeEntitiesRequest.serialize,
response_deserializer=language_service.AnalyzeEntitiesResponse.deserialize,
)
return self._stubs["analyze_entities"]
@property
def analyze_entity_sentiment(
self,
) -> Callable[
[language_service.AnalyzeEntitySentimentRequest],
Awaitable[language_service.AnalyzeEntitySentimentResponse],
]:
r"""Return a callable for the analyze entity sentiment method over gRPC.
Finds entities, similar to
[AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities]
in the text and analyzes sentiment associated with each entity
and its mentions.
Returns:
Callable[[~.AnalyzeEntitySentimentRequest],
Awaitable[~.AnalyzeEntitySentimentResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "analyze_entity_sentiment" not in self._stubs:
self._stubs["analyze_entity_sentiment"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1beta2.LanguageService/AnalyzeEntitySentiment",
request_serializer=language_service.AnalyzeEntitySentimentRequest.serialize,
response_deserializer=language_service.AnalyzeEntitySentimentResponse.deserialize,
)
return self._stubs["analyze_entity_sentiment"]
@property
def analyze_syntax(
self,
) -> Callable[
[language_service.AnalyzeSyntaxRequest],
Awaitable[language_service.AnalyzeSyntaxResponse],
]:
r"""Return a callable for the analyze syntax method over gRPC.
Analyzes the syntax of the text and provides sentence
boundaries and tokenization along with part-of-speech
tags, dependency trees, and other properties.
Returns:
Callable[[~.AnalyzeSyntaxRequest],
Awaitable[~.AnalyzeSyntaxResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "analyze_syntax" not in self._stubs:
self._stubs["analyze_syntax"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1beta2.LanguageService/AnalyzeSyntax",
request_serializer=language_service.AnalyzeSyntaxRequest.serialize,
response_deserializer=language_service.AnalyzeSyntaxResponse.deserialize,
)
return self._stubs["analyze_syntax"]
@property
def classify_text(
self,
) -> Callable[
[language_service.ClassifyTextRequest],
Awaitable[language_service.ClassifyTextResponse],
]:
r"""Return a callable for the classify text method over gRPC.
Classifies a document into categories.
Returns:
Callable[[~.ClassifyTextRequest],
Awaitable[~.ClassifyTextResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "classify_text" not in self._stubs:
self._stubs["classify_text"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1beta2.LanguageService/ClassifyText",
request_serializer=language_service.ClassifyTextRequest.serialize,
response_deserializer=language_service.ClassifyTextResponse.deserialize,
)
return self._stubs["classify_text"]
@property
def annotate_text(
self,
) -> Callable[
[language_service.AnnotateTextRequest],
Awaitable[language_service.AnnotateTextResponse],
]:
r"""Return a callable for the annotate text method over gRPC.
A convenience method that provides all syntax,
sentiment, entity, and classification features in one
call.
Returns:
Callable[[~.AnnotateTextRequest],
Awaitable[~.AnnotateTextResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "annotate_text" not in self._stubs:
self._stubs["annotate_text"] = self.grpc_channel.unary_unary(
"/google.cloud.language.v1beta2.LanguageService/AnnotateText",
request_serializer=language_service.AnnotateTextRequest.serialize,
response_deserializer=language_service.AnnotateTextResponse.deserialize,
)
return self._stubs["annotate_text"]
def close(self):
return self.grpc_channel.close()
__all__ = ("LanguageServiceGrpcAsyncIOTransport",)
|
|
from JumpScale import j
from stat import *
import brotli
import hashlib
import functools
import subprocess
import pwd
import grp
import os
import sys
import re
class FListFactory(object):
def __init__(self):
self.__jslocation__ = "j.tools.flist"
def get_flist(self):
"""
Return a Flist object
"""
return FList()
def get_archiver(self):
"""
Return a FListArchiver object
This is used to push flist to IPFS
"""
return FListArchiver()
class FList(object):
"""
FList (sometime "plist") files contains a plain/text representation of
a complete file system tree
FList stand for "file list" (plist for "path list"), this format is made
for mapping a file with his md5 hash, which allow to retreive file remotly
and get it's metadata separatly
FList is formatted to support POSIX ACL, File type representation and
extra data (can be any type but it's used internaly to describe some file-type)
A flist file contains one entry per file, fields are separated by "|".
Filename should not contains the pipe character in it's name otherwise it will
not be supported at all.
This is a flist file format supported by this library:
filepath|hash|filesize|uname|gname|permissions|filetype|ctime|mtime|extended
- filepath: the complete file path on the filesystem
- hash: md5 checksum of the file
- if the file is a special file (block, sylink, ...), use this hash:
md5("flist:" + filename (fullpath) + ":" + mtime)
- filesize: size in bytes
- uname: username owner of the file (used for permissions)
- note: if username doesn't match any userid, userid will be used
- gname: groupname owner of the file (used for permissions)
- note: if groupname doesn't match any groupid, groupid will be used
- permissions: octal representation of the posix permissions
- filetype: integer representing the file type:
- 0: socket (S_IFSOCK)
- 1: symlink (S_IFLNK)
- 2: regular file (S_IFREG)
- 3: block device (S_IFBLK)
- 4: directory (S_IFDIR) (used for empty directory)
- 5: char. device (S_IFCHR)
- 6: fifo pipe (S_IFIFO)
- ctime: unix timestamp of the creation time
- mtime: unix timestamp of the modification file
- extended: optional field which may contains extra-data related to
to file type:
- symlink : contains the target of the link
- block device: ...
- char. device: ...
"""
def __init__(self):
self._data = []
self._hash = {}
self._path = {}
def parse(self, filename):
del self._data[:]
self._hash.clear()
self._path.clear()
index = 0
with open(filename) as flist:
for line in flist:
f = line.strip().split('|')
index = self._indexForPath(f[1])
self._data[index] = [
f[0], # path
f[1], # hash
int(f[2]), # size
f[3], # uname
f[4], # gname
f[5], # permission
int(f[6]), # filetype
int(f[7]), # ctime
int(f[8]), # mtime
f[9] # extended
]
return index
"""
Getters
"""
def _indexsFromHash(self, hash):
if hash not in self._hash:
return None
return self._hash[hash]
def getHashList(self):
hashes = []
for x in self._data:
hashes.append(x[1])
return hashes
def filesFromHash(self, hash):
paths = []
ids = self._indexsFromHash(hash)
# adding paths from ids list
for x in ids:
paths.append(self._data[x][0])
return paths
def _getItem(self, filename, index):
id = self._path[filename]
if id is not None:
return self._data[id][index]
return None
def getHash(self, filename):
return self._getItem(filename, 1)
def getType(self, filename):
type = self._getItem(filename, 0)
if type is None:
return None
# FIXME
return None
def isRegular(self, filename):
return self._getItem(filename, 6) == 2
def getSize(self, filename):
return self._getItem(filename, 2)
def getMode(self, filename):
return self._getItem(filename, 5)
def getOwner(self, filename):
return self._getItem(filename, 3)
def getGroup(self, filename):
return self._getItem(filename, 4)
def getExtended(self, filename):
# return self._getItem(filename, 0)
return -1
def getCreationTime(self, filename):
return self._getItem(filename, 7)
def getModificationTime(self, filename):
return self._getItem(filename, 8)
"""
Setters
"""
def _indexForPath(self, filename):
if filename not in self._path:
# creating new entry
self._data.append([None] * 10)
id = len(self._data) - 1
self._data[id][0] = filename
self._path[filename] = id
return self._path[filename]
def _setItem(self, filename, value, index):
id = self._indexForPath(filename)
if id is None:
return None
self._data[id][index] = value
return value
def setHash(self, filename, value):
self._setItem(filename, value, 1)
# updating hash list
id = self._indexForPath(filename)
if value in self._hash:
self._hash[value].append(id)
else:
self._hash[value] = [id]
return value
def setType(self, filename, value):
# testing regular first, it will probably be
# the most often used type
if S_ISREG(value):
return self._setItem(filename, 2, 6)
# testing special files type
if S_ISSOCK(value):
return self._setItem(filename, 0, 6)
if S_ISLNK(value):
return self._setItem(filename, 1, 6)
if S_ISBLK(value):
return self._setItem(filename, 3, 6)
if S_ISCHR(value):
return self._setItem(filename, 5, 6)
if S_ISFIFO(value):
return self._setItem(filename, 6, 6)
# keep track of empty directories
if S_ISDIR(value):
return self._setItem(filename, 4, 6)
return None
def setSize(self, filename, value):
return self._setItem(filename, value, 2)
def setMode(self, filename, value):
return self._setItem(filename, value, 5)
def setOwner(self, filename, value):
return self._setItem(filename, value, 3)
def setGroup(self, filename, value):
return self._setItem(filename, value, 4)
def setExtended(self, filename, value):
"""
value: need to be a stat struct
"""
path = self._getItem(filename, 0)
# symlink
if S_ISLNK(value.st_mode):
xtd = os.readlink(path)
return self._setItem(filename, xtd, 9)
# block device
if S_ISBLK(value.st_mode) or S_ISCHR(value.st_mode):
id = '%d,%d' % (os.major(value.st_rdev), os.minor(value.st_rdev))
return self._setItem(filename, id, 9)
return self._setItem(filename, "", 9)
def setModificationTime(self, filename, value):
return self._setItem(filename, int(value), 7)
def setCreationTime(self, filename, value):
return self._setItem(filename, int(value), 8)
"""
Builder
"""
def _build(self, filename):
stat = os.stat(filename, follow_symlinks=False)
mode = oct(stat.st_mode)[4:]
# grab username from userid, if not found, use userid
try:
uname = pwd.getpwuid(stat.st_uid).pw_name
except:
uname = stat.st_uid
# grab groupname from groupid, if not found, use groupid
try:
gname = grp.getgrgid(stat.st_gid).gr_name
except:
gname = stat.st_gid
# compute hash only if it's a regular file, otherwise, comute filename hash
# the hash is used to access the file "id" in the list, we cannot have empty hash
if not S_ISREG(stat.st_mode):
hashstr = "flist:%s:%d" % (filename, stat.st_mtime)
hash = j.data.hash.md5_string(hashstr)
else:
hash = j.data.hash.md5(filename)
self.setHash(filename, hash)
self.setType(filename, stat.st_mode)
self.setSize(filename, stat.st_size)
self.setMode(filename, mode)
self.setOwner(filename, uname)
self.setGroup(filename, gname)
self.setExtended(filename, stat)
self.setModificationTime(filename, stat.st_mtime)
self.setCreationTime(filename, stat.st_ctime)
def __valid(self, fname, excludes):
for ex in excludes:
if ex.match(fname):
return False
return True
def build(self, path, excludes=[]):
if len(self._data) > 0:
# this can be only done on empty list
return None
# compiling regex for exclusion
__excludes = []
for ex in excludes:
__excludes.append(re.compile(ex))
for dirpath, dirs, files in os.walk(path, followlinks=True):
for dirname in dirs:
fname = os.path.join(dirpath, dirname)
# exclusion checking
if not self.__valid(fname, __excludes):
continue
if j.sal.fs.isEmptyDir(fname):
self._build(fname)
for filename in files:
fname = os.path.join(dirpath, filename)
# exclusion checking
if not self.__valid(fname, __excludes):
continue
self._build(fname)
return len(self._data)
"""
Exporting
"""
def dumps(self, trim=''):
data = []
for f in self._data:
p = f[0]
if p.startswith(trim):
p = p[len(trim):]
line = "%s|%s|%d|%s|%s|%s|%d|%d|%d|%s" % (
p, f[1], f[2], f[3], f[4], f[5], f[6], f[7], f[8], f[9]
)
data.append(line)
return "\n".join(data) + "\n"
def _debug(self):
tableMain = sys.getsizeof(self._data)
tableHash = sys.getsizeof(self._hash)
tablePath = sys.getsizeof(self._path)
print("Main table: %.2f ko" % (float(tableMain) / 1024))
print("Hash table: %.2f ko" % (float(tableHash) / 1024))
print("Path table: %.2f ko" % (float(tablePath) / 1024))
class FListArchiver:
# This is a not efficient way, the only other possibility
# is to call brotli binary to compress big file if needed
# currently, this in-memory way is used
def __init__(self, ipfs_cfgdir=None):
cl = j.tools.cuisine.local
self._ipfs = cl.core.command_location('ipfs')
if not ipfs_cfgdir:
self._env = 'IPFS_PATH=%s' % cl.core.args_replace('$cfgDir/ipfs/main')
else:
self._env = 'IPFS_PATH=%s' % ipfs_cfgdir
def _compress(self, source, destination):
with open(source, 'rb') as content_file:
content = content_file.read()
compressed = brotli.compress(content, quality=6)
with open(destination, "wb") as output:
output.write(compressed)
def push_to_ipfs(self, source):
cmd = "%s %s add '%s'" % (self._env, self._ipfs, source)
out = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
m = re.match(r'^added (.+) (.+)$', out.stdout.decode())
if m is None:
raise RuntimeError('invalid output from ipfs add: %s' % out)
return m.group(1)
def build(self, flist, backend):
hashes = flist.getHashList()
if not os.path.exists(backend):
os.makedirs(backend)
for hash in hashes:
files = flist.filesFromHash(hash)
# skipping non regular files
if not flist.isRegular(files[0]):
continue
print("Processing: %s" % hash)
root = "%s/%s/%s" % (backend, hash[0:2], hash[2:4])
file = hash
target = "%s/%s" % (root, file)
if not os.path.exists(root):
os.makedirs(root)
# compressing the file
self._compress(files[0], target)
# adding it to ipfs network
hash = self.push_to_ipfs(target)
print("Network hash: %s" % hash)
# updating flist hash with ipfs hash
for f in files:
flist.setHash(f, hash)
print("Files compressed and shared")
|
|
from django.conf import settings
from collections import defaultdict
from django.db import models
from django.db.models.query import (QuerySet, ValuesQuerySet, DateQuerySet,
CHUNK_SIZE)
from django.db.models.query_utils import Q
from django.utils.translation import get_language
from nani.fieldtranslator import translate
from nani.utils import combine
import django
import logging
logger = logging.getLogger(__name__)
# maybe there should be an extra settings for this
FALLBACK_LANGUAGES = [ code for code, name in settings.LANGUAGES ]
class FieldTranslator(dict):
"""
Translates *shared* field names from '<shared_field>' to
'master__<shared_field>' and caches those names.
"""
def __init__(self, manager):
self.manager = manager
self.shared_fields = tuple(self.manager.shared_model._meta.get_all_field_names()) + ('pk',)
self.translated_fields = tuple(self.manager.model._meta.get_all_field_names())
super(FieldTranslator, self).__init__()
def get(self, key):
if not key in self:
self[key] = self.build(key)
return self[key]
def build(self, key):
"""
Checks if the selected field is a shared field
and in that case, prefixes it with master___
It also handles - and ? in case its called by
order_by()
"""
if key == "?":
return key
if key.startswith("-"):
prefix = "-"
key = key[1:]
else:
prefix = ""
if key.startswith(self.shared_fields):
return '%smaster__%s' % (prefix, key)
else:
return '%s%s' % (prefix, key)
class ValuesMixin(object):
def _strip_master(self, key):
if key.startswith('master__'):
return key[8:]
return key
def iterator(self):
for row in super(ValuesMixin, self).iterator():
if isinstance(row, dict):
yield dict([(self._strip_master(k), v) for k,v in row.items()])
else:
yield row
class DatesMixin(object):
pass
#===============================================================================
# Default
#===============================================================================
class TranslationQueryset(QuerySet):
"""
This is where things happen.
To fully understand this project, you have to understand this class.
Go through each method individually, maybe start with 'get', 'create' and
'iterator'.
IMPORTANT: the `model` attribute on this class is the *translated* Model,
despite this being used as the queryset for the *shared* Model!
"""
override_classes = {
ValuesQuerySet: ValuesMixin,
DateQuerySet: DatesMixin,
}
def __init__(self, model=None, query=None, using=None, real=None):
self._local_field_names = None
self._field_translator = None
self._real_manager = real
self._fallback_manager = None
self._language_code = None
super(TranslationQueryset, self).__init__(model=model, query=query, using=using)
#===========================================================================
# Helpers and properties (INTERNAL!)
#===========================================================================
@property
def shared_model(self):
"""
Get the shared model class
"""
return self._real_manager.model
@property
def field_translator(self):
"""
Field translator for this manager
"""
if self._field_translator is None:
self._field_translator = FieldTranslator(self)
return self._field_translator
@property
def shared_local_field_names(self):
if self._local_field_names is None:
self._local_field_names = self.shared_model._meta.get_all_field_names()
return self._local_field_names
def _translate_args_kwargs(self, *args, **kwargs):
# Translated kwargs from '<shared_field>' to 'master__<shared_field>'
# where necessary.
newkwargs = {}
for key, value in kwargs.items():
newkwargs[self.field_translator.get(key)] = value
# Translate args (Q objects) from '<shared_field>' to
# 'master__<shared_field>' where necessary.
newargs = []
for q in args:
newargs.append(self._recurse_q(q))
return newargs, newkwargs
def _translate_fieldnames(self, fieldnames):
newnames = []
for name in fieldnames:
newnames.append(self.field_translator.get(name))
return newnames
def _reverse_translate_fieldnames_dict(self, fieldname_dict):
"""
Helper function to make sure the user doesnt get "bothered"
with the construction of shared/translated model
Translates e.g.
{'master__number_avg': 10} to {'number__avg': 10}
"""
newdict = {}
for key, value in fieldname_dict.items():
if key.startswith("master__"):
key = key.replace("master__", "")
newdict[key] = value
return newdict
def _recurse_q(self, q):
"""
Recursively translate fieldnames in a Q object.
TODO: What happens if we span multiple relations?
"""
newchildren = []
for child in q.children:
if isinstance(child, Q):
newq = self._recurse_q(child)
newchildren.append(self._recurse_q(newq))
else:
key, value = child
newchildren.append((self.field_translator.get(key), value))
q.children = newchildren
return q
def _find_language_code(self, q):
"""
Checks if it finds a language code in a Q object (and it's children).
"""
language_code = None
for child in q.children:
if isinstance(child, Q):
language_code = self._find_language_code(child)
elif isinstance(child, tuple):
key, value = child
if key == 'language_code':
language_code = value
if language_code:
break
return language_code
def _split_kwargs(self, **kwargs):
"""
Split kwargs into shared and translated fields
"""
shared = {}
translated = {}
for key, value in kwargs.items():
if key in self.shared_local_field_names:
shared[key] = value
else:
translated[key] = value
return shared, translated
def _get_class(self, klass):
for key, value in self.override_classes.items():
if issubclass(klass, key):
return type(value.__name__, (value, klass, TranslationQueryset,), {})
return klass
def _get_shared_query_set(self):
qs = super(TranslationQueryset, self)._clone()
qs.__class__ = QuerySet
# un-select-related the 'master' relation
del qs.query.select_related['master']
accessor = self.shared_model._meta.translations_accessor
# update using the real manager
return self._real_manager.filter(**{'%s__in' % accessor:qs})
#===========================================================================
# Queryset/Manager API
#===========================================================================
def language(self, language_code=None):
if not language_code:
language_code = get_language()
self._language_code = language_code
return self.filter(language_code=language_code)
def __getitem__(self, k):
"""
Handle getitem special since self.iterator is called *after* the
slicing happens, when it's no longer possible to filter a queryest.
Therefore the check for _language_code must be done here.
"""
if not self._language_code:
return self.language().__getitem__(k)
return super(TranslationQueryset, self).__getitem__(k)
def create(self, **kwargs):
if 'language_code' not in kwargs:
if self._language_code:
kwargs['language_code'] = self._language_code
else:
kwargs['language_code'] = get_language()
obj = self.shared_model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def get(self, *args, **kwargs):
"""
Get an object by querying the translations model and returning a
combined instance.
"""
# Enforce a language_code to be used
newargs, newkwargs = self._translate_args_kwargs(*args, **kwargs)
# Enforce 'select related' onto 'master'
# Get the translated instance
found = False
qs = self._clone()
if 'language_code' in newkwargs:
language_code = newkwargs.pop('language_code')
qs = qs.language(language_code)
found = True
elif args:
language_code = None
for arg in args:
if not isinstance(arg, Q):
continue
language_code = self._find_language_code(arg)
if language_code:
break
if language_code:
qs = qs.language(language_code)
found = True
else:
for where in qs.query.where.children:
if where.children:
for child in where.children:
if child[0].field.name == 'language_code':
found = True
break
if found:
break
if not found:
qs = qs.language()
# self.iterator already combines! Isn't that nice?
return QuerySet.get(qs, *newargs, **newkwargs)
def filter(self, *args, **kwargs):
newargs, newkwargs = self._translate_args_kwargs(*args, **kwargs)
return super(TranslationQueryset, self).filter(*newargs, **newkwargs)
def aggregate(self, *args, **kwargs):
"""
Loops over all the passed aggregates and translates the fieldnames
"""
newargs, newkwargs = [], {}
for arg in args:
arg.lookup = self._translate_fieldnames([arg.lookup])[0]
newargs.append(arg)
for key in kwargs:
value = kwargs[key]
value.lookup = self._translate_fieldnames([value.lookup])[0]
newkwargs[key] = value
response = super(TranslationQueryset, self).aggregate(*newargs, **newkwargs)
return self._reverse_translate_fieldnames_dict(response)
def latest(self, field_name=None):
if field_name:
field_name = self.field_translator.get(field_name)
return super(TranslationQueryset, self).latest(field_name)
def in_bulk(self, id_list):
raise NotImplementedError()
def delete(self):
qs = self._get_shared_query_set()
qs.delete()
delete.alters_data = True
def delete_translations(self):
self.update(master=None)
super(TranslationQueryset, self).delete()
delete_translations.alters_data = True
def update(self, **kwargs):
shared, translated = self._split_kwargs(**kwargs)
count = 0
if translated:
count += super(TranslationQueryset, self).update(**translated)
if shared:
shared_qs = self._get_shared_query_set()
count += shared_qs.update(**shared)
return count
update.alters_data = True
def values(self, *fields):
fields = self._translate_fieldnames(fields)
return super(TranslationQueryset, self).values(*fields)
def values_list(self, *fields, **kwargs):
fields = self._translate_fieldnames(fields)
return super(TranslationQueryset, self).values_list(*fields, **kwargs)
def dates(self, field_name, kind=None, order='ASC'):
field_name = self.field_translator.get(field_name)
if int(django.get_version().split('.')[1][0]) <= 2:
from nani.compat.date import DateQuerySet
return self._clone(klass=DateQuerySet, setup=True,
_field_name=field_name, _kind=kind, _order=order)
return super(TranslationQueryset, self).dates(field_name, kind=kind, order=order)
def exclude(self, *args, **kwargs):
newargs, newkwargs = self._translate_args_kwargs(*args, **kwargs)
return super(TranslationQueryset, self).exclude(*newargs, **newkwargs)
def complex_filter(self, filter_obj):
# Don't know how to handle Q object yet, but it is probably doable...
# An unknown type object that supports 'add_to_query' is a different story :)
if isinstance(filter_obj, models.Q) or hasattr(filter_obj, 'add_to_query'):
raise NotImplementedError()
newargs, newkwargs = self._translate_args_kwargs(**filter_obj)
return super(TranslationQueryset, self)._filter_or_exclude(None, *newargs, **newkwargs)
def annotate(self, *args, **kwargs):
raise NotImplementedError()
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
fieldnames = self._translate_fieldnames(field_names)
return super(TranslationQueryset, self).order_by(*fieldnames)
def count(self):
if not self._language_code:
return self.language().count()
return super(TranslationQueryset, self).count()
def reverse(self):
return super(TranslationQueryset, self).reverse()
def defer(self, *fields):
raise NotImplementedError()
def only(self, *fields):
raise NotImplementedError()
def _clone(self, klass=None, setup=False, **kwargs):
kwargs.update({
'_local_field_names': self._local_field_names,
'_field_translator': self._field_translator,
'_language_code': self._language_code,
'_real_manager': self._real_manager,
'_fallback_manager': self._fallback_manager,
})
if klass:
klass = self._get_class(klass)
else:
klass = self.__class__
return super(TranslationQueryset, self)._clone(klass, setup, **kwargs)
def iterator(self):
"""
If this queryset is not filtered by a language code yet, it should be
filtered first by calling self.language.
If someone doesn't want a queryset filtered by language, they should use
Model.objects.untranslated()
"""
if not self._language_code:
for obj in self.language().iterator():
yield obj
else:
for obj in super(TranslationQueryset, self).iterator():
# non-cascade-deletion hack:
if not obj.master:
yield obj
else:
yield combine(obj)
class TranslationManager(models.Manager):
"""
Manager class for models with translated fields
"""
#===========================================================================
# API
#===========================================================================
def using_translations(self):
if not hasattr(self, '_real_manager'):
self.contribute_real_manager()
qs = TranslationQueryset(self.translations_model, using=self.db, real=self._real_manager)
return qs.select_related('master')
def language(self, language_code=None):
return self.using_translations().language(language_code)
def untranslated(self):
return self._fallback_manager.get_query_set()
#===========================================================================
# Internals
#===========================================================================
@property
def translations_model(self):
"""
Get the translations model class
"""
return self.model._meta.translations_model
#def get_query_set(self):
# """
# Make sure that querysets inherit the methods on this manager (chaining)
# """
# return self.untranslated()
def contribute_to_class(self, model, name):
super(TranslationManager, self).contribute_to_class(model, name)
self.name = name
self.contribute_real_manager()
self.contribute_fallback_manager()
def contribute_real_manager(self):
self._real_manager = models.Manager()
self._real_manager.contribute_to_class(self.model, '_%s' % getattr(self, 'name', 'objects'))
def contribute_fallback_manager(self):
self._fallback_manager = TranslationFallbackManager()
self._fallback_manager.contribute_to_class(self.model, '_%s_fallback' % getattr(self, 'name', 'objects'))
#===============================================================================
# Fallbacks
#===============================================================================
class FallbackQueryset(QuerySet):
'''
Queryset that tries to load a translated version using fallbacks on a per
instance basis.
BEWARE: creates a lot of queries!
'''
def __init__(self, *args, **kwargs):
self._translation_fallbacks = None
super(FallbackQueryset, self).__init__(*args, **kwargs)
def _get_real_instances(self, base_results):
"""
The logic for this method was taken from django-polymorphic by Bert
Constantin (https://github.com/bconstantin/django_polymorphic) and was
slightly altered to fit the needs of django-nani.
"""
# get the primary keys of the shared model results
base_ids = [obj.pk for obj in base_results]
fallbacks = list(self._translation_fallbacks)
# get all translations for the fallbacks chosen for those shared models,
# note that this query is *BIG* and might return a lot of data, but it's
# arguably faster than running one query for each result or even worse
# one query per result per language until we find something
translations_manager = self.model._meta.translations_model.objects
baseqs = translations_manager.select_related('master')
translations = baseqs.filter(language_code__in=fallbacks,
master__pk__in=base_ids)
fallback_objects = defaultdict(dict)
# turn the results into a dict of dicts with shared model primary key as
# keys for the first dict and language codes for the second dict
for obj in translations:
fallback_objects[obj.master.pk][obj.language_code] = obj
# iterate over the share dmodel results
for instance in base_results:
translation = None
# find the translation
for fallback in fallbacks:
translation = fallback_objects[instance.pk].get(fallback, None)
if translation is not None:
break
# if we found a translation, yield the combined result
if translation:
yield combine(translation)
else:
# otherwise yield the shared instance only
logger.error("no translation for %s, type %s" % (instance, type(instance)))
def iterator(self):
"""
The logic for this method was taken from django-polymorphic by Bert
Constantin (https://github.com/bconstantin/django_polymorphic) and was
slightly altered to fit the needs of django-nani.
"""
base_iter = super(FallbackQueryset, self).iterator()
# only do special stuff when we actually want fallbacks
if self._translation_fallbacks:
while True:
base_result_objects = []
reached_end = False
# get the next "chunk" of results
for i in range(CHUNK_SIZE):
try:
instance = base_iter.next()
base_result_objects.append(instance)
except StopIteration:
reached_end = True
break
# "combine" the results with their fallbacks
real_results = self._get_real_instances(base_result_objects)
# yield em!
for instance in real_results:
yield instance
# get out of the while loop if we're at the end, since this is
# an iterator, we need to raise StopIteration, not "return".
if reached_end:
raise StopIteration
else:
# just iterate over it
for instance in base_iter:
yield instance
def use_fallbacks(self, *fallbacks):
if fallbacks:
self._translation_fallbacks = fallbacks
else:
self._translation_fallbacks = FALLBACK_LANGUAGES
return self
def _clone(self, klass=None, setup=False, **kwargs):
kwargs.update({
'_translation_fallbacks': self._translation_fallbacks,
})
return super(FallbackQueryset, self)._clone(klass, setup, **kwargs)
class TranslationFallbackManager(models.Manager):
"""
Manager class for the shared model, without specific translations. Allows
using `use_fallbacks()` to enable per object language fallback.
"""
def use_fallbacks(self, *fallbacks):
return self.get_query_set().use_fallbacks(*fallbacks)
def get_query_set(self):
qs = FallbackQueryset(self.model, using=self.db)
return qs
#===============================================================================
# TranslationAware
#===============================================================================
class TranslationAwareQueryset(QuerySet):
def __init__(self, *args, **kwargs):
super(TranslationAwareQueryset, self).__init__(*args, **kwargs)
self._language_code = None
def _translate_args_kwargs(self, *args, **kwargs):
self.language(self._language_code)
language_joins = []
newkwargs = {}
extra_filters = Q()
for key, value in kwargs.items():
newkey, langjoins = translate(key, self.model)
for langjoin in langjoins:
if langjoin not in language_joins:
language_joins.append(langjoin)
newkwargs[newkey] = value
newargs = []
for q in args:
new_q, langjoins = self._recurse_q(q)
newargs.append(new_q)
for langjoin in langjoins:
if langjoin not in language_joins:
language_joins.append(langjoin)
for langjoin in language_joins:
extra_filters &= Q(**{langjoin: self._language_code})
return newargs, newkwargs, extra_filters
def _recurse_q(self, q):
newchildren = []
language_joins = []
for child in q.children:
if isinstance(child, Q):
newq = self._recurse_q(child)
newchildren.append(self._recurse_q(newq))
else:
key, value = child
newkey, langjoins =translate(key, self.model)
newchildren.append((newkey, value))
for langjoin in langjoins:
if langjoin not in language_joins:
language_joins.append(langjoin)
q.children = newchildren
return q, language_joins
def _translate_fieldnames(self, fields):
self.language(self._language_code)
newfields = []
extra_filters = Q()
language_joins = []
for field in fields:
newfield, langjoins = translate(field, self.model)
newfields.append(newfield)
for langjoin in langjoins:
if langjoin not in language_joins:
language_joins.append(langjoin)
for langjoin in language_joins:
extra_filters &= Q(**{langjoin: self._language_code})
return newfields, extra_filters
#===========================================================================
# Queryset/Manager API
#===========================================================================
def language(self, language_code=None):
if not language_code:
language_code = get_language()
self._language_code = language_code
return self
def get(self, *args, **kwargs):
newargs, newkwargs, extra_filters = self._translate_args_kwargs(*args, **kwargs)
return self._filter_extra(extra_filters).get(*newargs, **newkwargs)
def filter(self, *args, **kwargs):
newargs, newkwargs, extra_filters = self._translate_args_kwargs(*args, **kwargs)
return self._filter_extra(extra_filters).filter(*newargs, **newkwargs)
def aggregate(self, *args, **kwargs):
raise NotImplementedError()
def latest(self, field_name=None):
extra_filters = Q()
if field_name:
field_name, extra_filters = translate(self, field_name)
return self._filter_extra(extra_filters).latest(field_name)
def in_bulk(self, id_list):
raise NotImplementedError()
def values(self, *fields):
fields, extra_filters = self._translate_fieldnames(fields)
return self._filter_extra(extra_filters).values(*fields)
def values_list(self, *fields, **kwargs):
fields, extra_filters = self._translate_fieldnames(fields)
return self._filter_extra(extra_filters).values_list(*fields, **kwargs)
def dates(self, field_name, kind, order='ASC'):
raise NotImplementedError()
def exclude(self, *args, **kwargs):
newargs, newkwargs, extra_filters = self._translate_args_kwargs(*args, **kwargs)
return self._exclude_extra(extra_filters).exclude(*newargs, **newkwargs)
def complex_filter(self, filter_obj):
# admin calls this with an empy filter_obj sometimes
if filter_obj == {}:
return self
raise NotImplementedError()
def annotate(self, *args, **kwargs):
raise NotImplementedError()
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
fieldnames, extra_filters = self._translate_fieldnames(field_names)
return self._filter_extra(extra_filters).order_by(*fieldnames)
def reverse(self):
raise NotImplementedError()
def defer(self, *fields):
raise NotImplementedError()
def only(self, *fields):
raise NotImplementedError()
def _clone(self, klass=None, setup=False, **kwargs):
kwargs.update({
'_language_code': self._language_code,
})
return super(TranslationAwareQueryset, self)._clone(klass, setup, **kwargs)
def _filter_extra(self, extra_filters):
qs = super(TranslationAwareQueryset, self).filter(extra_filters)
return super(TranslationAwareQueryset, qs)
def _exclude_extra(self, extra_filters):
qs = super(TranslationAwareQueryset, self).exclude(extra_filters)
return super(TranslationAwareQueryset, qs)
class TranslationAwareManager(models.Manager):
def language(self, language_code=None):
return self.get_query_set().language(language_code)
def get_query_set(self):
qs = TranslationAwareQueryset(self.model, using=self.db)
return qs
#===============================================================================
# Translations Model Manager
#===============================================================================
class TranslationsModelManager(models.Manager):
def get_language(self, language):
return self.get(language_code=language)
|
|
#!/usr/bin/env python
import argparse
import csv
import itertools
import math
import matplotlib
import os
import subprocess
import sys
class WorkingDirectory:
"Scoped context for changing working directory"
def __init__(self, working_dir):
self.original_dir = os.getcwd()
self.working_dir = working_dir
def __enter__(self):
sys.stderr.write("Entering directory `%s'\n" % self.working_dir)
os.chdir(self.working_dir)
return self
def __exit__(self, type, value, traceback):
sys.stderr.write("Leaving directory `%s'\n" % self.working_dir)
os.chdir(self.original_dir)
def filename(n):
"Filename for a generated file with n statements"
return "gen%d.ttl" % n
def gen(sp2b_dir, n_min, n_max, step):
"Generate files with n_min ... n_max statements if they are not present"
with WorkingDirectory(sp2b_dir) as dir:
for n in range(n_min, n_max + step, step):
out_path = os.path.join(dir.original_dir, "build", filename(n))
if not os.path.exists(out_path):
subprocess.call(["./sp2b_gen", "-t", str(n), out_path])
def write_header(results, progs):
"Write the header line for TSV output"
results.write("n")
for prog in progs:
results.write("\t" + os.path.basename(prog.split()[0]))
results.write("\n")
def parse_time(report):
"Return user time and max RSS from a /usr/bin/time -v report"
time = memory = None
for line in report.split("\n"):
if line.startswith("\tUser time"):
time = float(line[line.find(":") + 1 :])
elif line.startswith("\tMaximum resident set"):
memory = float(line[line.find(":") + 1 :]) * 1024
return (time, memory)
def get_dashes():
"Generator for plot line dash patterns"
dash = 2.0
space = dot = 0.75
yield [] # Solid
yield [dash, space] # Dashed
yield [dot, space] # Dotted
# Dash-dots, with increasing number of dots for each line
for i in itertools.count(2):
yield [dash, space] + [dot, space] * (i - 1)
def plot(in_file, out_filename, x_label, y_label, y_max=None):
"Plot a TSV file as SVG"
matplotlib.use("agg")
import matplotlib.pyplot as plt
fig_height = 4.0
dashes = get_dashes()
markers = itertools.cycle(["o", "s", "v", "D", "*", "p", "P", "h", "X"])
reader = csv.reader(in_file, delimiter="\t")
header = next(reader)
cols = [x for x in zip(*list(reader))]
plt.clf()
fig = plt.figure(figsize=(fig_height * math.sqrt(2), fig_height))
ax = fig.add_subplot(111)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
if y_max is not None:
ax.set_ylim([0.0, y_max])
ax.grid(linewidth=0.25, linestyle=":", color="0", dashes=[0.2, 1.6])
ax.ticklabel_format(style="sci", scilimits=(4, 0), useMathText=True)
ax.tick_params(axis="both", width=0.75)
x = list(map(float, cols[0]))
for i, y in enumerate(cols[1::]):
ax.plot(
x,
list(map(float, y)),
label=header[i + 1],
marker=next(markers),
dashes=next(dashes),
markersize=3.0,
linewidth=1.0,
)
plt.legend()
plt.savefig(out_filename, bbox_inches="tight", pad_inches=0.025)
plt.close()
sys.stderr.write("wrote {}\n".format(out_filename))
def run(progs, n_min, n_max, step):
"Benchmark each program with n_min ... n_max statements"
with WorkingDirectory("build"):
results = {
"time": open("serdi-time.txt", "w"),
"throughput": open("serdi-throughput.txt", "w"),
"memory": open("serdi-memory.txt", "w"),
}
# Write TSV header for all output files
for name, f in results.items():
write_header(f, progs)
for n in range(n_min, n_max + step, step):
# Add first column (n) to rows
rows = {}
for name, _ in results.items():
rows[name] = [str(n)]
# Run each program and fill rows with measurements
for prog in progs:
cmd = "/usr/bin/time -v " + prog + " " + filename(n)
with open(filename(n) + ".out", "w") as out:
sys.stderr.write(cmd + "\n")
proc = subprocess.Popen(
cmd.split(), stdout=out, stderr=subprocess.PIPE
)
time, memory = parse_time(proc.communicate()[1].decode())
rows["time"] += ["%.07f" % time]
rows["throughput"] += ["%d" % (n / time)]
rows["memory"] += [str(memory)]
# Write rows to output files
for name, f in results.items():
f.write("\t".join(rows[name]) + "\n")
for name, f in results.items():
tsv_filename = "serdi-%s.txt" % name
sys.stderr.write("wrote %s\n" % tsv_filename)
def plot_results():
"Plot all benchmark results"
with WorkingDirectory("build"):
plot(
open("serdi-time.txt", "r"),
"serdi-time.svg",
"Statements",
"Time (s)",
)
plot(
open("serdi-throughput.txt", "r"),
"serdi-throughput.svg",
"Statements",
"Statements / s",
)
plot(
open("serdi-memory.txt", "r"),
"serdi-memory.svg",
"Statements",
"Bytes",
)
if __name__ == "__main__":
ap = argparse.ArgumentParser(
usage="%(prog)s [OPTION]... SP2B_DIR",
description="Benchmark RDF reading and writing commands\n",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
example:
%(prog)s --max 100000 \\
--run 'rapper -i turtle -o turtle' \\
--run 'riot --output=ttl' \\
--run 'rdfpipe -i turtle -o turtle' /path/to/sp2b/src/
""",
)
ap.add_argument(
"--max", type=int, default=1000000, help="maximum triple count"
)
ap.add_argument(
"--run",
type=str,
action="append",
default=[],
help="additional command to run (input file is appended)",
)
ap.add_argument(
"--no-generate", action="store_true", help="do not generate data"
)
ap.add_argument(
"--no-execute", action="store_true", help="do not run benchmarks"
)
ap.add_argument(
"--no-plot", action="store_true", help="do not plot benchmarks"
)
ap.add_argument("sp2b_dir", help="path to sp2b test data generator")
args = ap.parse_args(sys.argv[1:])
progs = ["serdi -b -f -i turtle -o turtle"] + args.run
min_n = int(args.max / 10)
max_n = args.max
step = min_n
if not args.no_generate:
gen(args.sp2b_dir, min_n, max_n, step)
if not args.no_execute:
run(progs, min_n, max_n, step)
if not args.no_plot:
plot_results()
|
|
import asyncio
import binascii
import cgi
import collections
import datetime
import enum
import io
import json
import math
import re
import time
import warnings
from email.utils import parsedate
from types import MappingProxyType
from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
from yarl import URL
from . import hdrs, multipart
from .helpers import HeadersMixin, SimpleCookie, reify, sentinel
from .protocol import WebResponse as ResponseImpl
from .protocol import HttpVersion10, HttpVersion11
__all__ = (
'ContentCoding', 'BaseRequest', 'Request', 'StreamResponse', 'Response',
'json_response'
)
FileField = collections.namedtuple('Field', 'name filename file content_type')
class ContentCoding(enum.Enum):
# The content codings that we have support for.
#
# Additional registered codings are listed at:
# https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding
deflate = 'deflate'
gzip = 'gzip'
identity = 'identity'
############################################################
# HTTP Request
############################################################
class BaseRequest(collections.MutableMapping, HeadersMixin):
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT,
hdrs.METH_TRACE, hdrs.METH_DELETE}
def __init__(self, message, payload, transport, reader, writer,
time_service, task, *,
loop=None, secure_proxy_ssl_header=None,
client_max_size=1024**2):
self._loop = loop
self._message = message
self._transport = transport
self._reader = reader
self._writer = writer
self._post = None
self._post_files_cache = None
self._payload = payload
self._read_bytes = None
self._has_body = not payload.at_eof()
self._secure_proxy_ssl_header = secure_proxy_ssl_header
self._time_service = time_service
self._state = {}
self._cache = {}
self._task = task
self._client_max_size = client_max_size
def clone(self, *, method=sentinel, rel_url=sentinel,
headers=sentinel):
"""Clone itself with replacement some attributes.
Creates and returns a new instance of Request object. If no parameters
are given, an exact copy is returned. If a parameter is not passed, it
will reuse the one from the current request object.
"""
if self._read_bytes:
raise RuntimeError("Cannot clone request "
"after reading it's content")
dct = {}
if method is not sentinel:
dct['method'] = method
if rel_url is not sentinel:
dct['path'] = str(URL(rel_url))
if headers is not sentinel:
dct['headers'] = CIMultiDict(headers)
dct['raw_headers'] = [(k.encode('utf-8'), v.encode('utf-8'))
for k, v in headers.items()]
message = self._message._replace(**dct)
return self.__class__(
message,
self._payload,
self._transport,
self._reader,
self._writer,
self._time_service,
self._task,
secure_proxy_ssl_header=self._secure_proxy_ssl_header)
@property
def task(self):
return self._task
# MutableMapping API
def __getitem__(self, key):
return self._state[key]
def __setitem__(self, key, value):
self._state[key] = value
def __delitem__(self, key):
del self._state[key]
def __len__(self):
return len(self._state)
def __iter__(self):
return iter(self._state)
########
@reify
def scheme(self):
"""A string representing the scheme of the request.
'http' or 'https'.
"""
warnings.warn("scheme is deprecated, "
"use .url.scheme instead",
DeprecationWarning)
return self.url.scheme
@reify
def _scheme(self):
if self._transport.get_extra_info('sslcontext'):
return 'https'
secure_proxy_ssl_header = self._secure_proxy_ssl_header
if secure_proxy_ssl_header is not None:
header, value = secure_proxy_ssl_header
if self.headers.get(header) == value:
return 'https'
return 'http'
@reify
def method(self):
"""Read only property for getting HTTP method.
The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
"""
return self._message.method
@reify
def version(self):
"""Read only property for getting HTTP version of request.
Returns aiohttp.protocol.HttpVersion instance.
"""
return self._message.version
@reify
def host(self):
"""Read only property for getting *HOST* header of request.
Returns str or None if HTTP request has no HOST header.
"""
warnings.warn("host property is deprecated, "
"use .url.host instead",
DeprecationWarning)
return self._message.headers.get(hdrs.HOST)
@reify
def rel_url(self):
# special case for path started with `//`
# if path starts with // it is valid host, but in case of web server
# liklyhood of it beein malformed path is much higher
url = URL(self._message.path)
if self._message.path.startswith('//'):
return url.with_path(self._message.path.split('?')[0])
return URL(self._message.path)
@reify
def path_qs(self):
"""The URL including PATH_INFO and the query string.
E.g, /app/blog?id=10
"""
warnings.warn("path_qs property is deprecated, "
"use str(request.rel_url) instead",
DeprecationWarning)
return str(self.rel_url)
@reify
def url(self):
return URL('{}://{}{}'.format(self._scheme,
self._message.headers.get(hdrs.HOST),
str(self.rel_url)))
@reify
def raw_path(self):
""" The URL including raw *PATH INFO* without the host or scheme.
Warning, the path is unquoted and may contains non valid URL characters
E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters``
"""
warnings.warn("raw_path property is deprecated, "
"use .rel_url.raw_path instead",
DeprecationWarning)
return self.rel_url.raw_path
@reify
def path(self):
"""The URL including *PATH INFO* without the host or scheme.
E.g., ``/app/blog``
"""
warnings.warn("path property is deprecated, use .rel_url.path instead",
DeprecationWarning)
return self.rel_url.path
@reify
def query_string(self):
"""The query string in the URL.
E.g., id=10
"""
warnings.warn("query_string property is deprecated, "
"use .rel_url.query_string instead",
DeprecationWarning)
return self.rel_url.query_string
@reify
def GET(self):
"""A multidict with all the variables in the query string.
Lazy property.
"""
warnings.warn("GET property is deprecated, use .rel_url.query instead",
DeprecationWarning)
return self.rel_url.query
@reify
def POST(self):
"""A multidict with all the variables in the POST parameters.
post() methods has to be called before using this attribute.
"""
warnings.warn("POST property is deprecated, use .post() instead",
DeprecationWarning)
if self._post is None:
raise RuntimeError("POST is not available before post()")
return self._post
@reify
def headers(self):
"""A case-insensitive multidict proxy with all headers."""
return CIMultiDictProxy(self._message.headers)
@reify
def raw_headers(self):
"""A sequence of pars for all headers."""
return tuple(self._message.raw_headers)
@reify
def if_modified_since(self, _IF_MODIFIED_SINCE=hdrs.IF_MODIFIED_SINCE):
"""The value of If-Modified-Since HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_IF_MODIFIED_SINCE)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@property
def keep_alive(self):
"""Is keepalive enabled by client?"""
return not self._message.should_close
@property
def time_service(self):
"""Time service"""
return self._time_service
@property
def transport(self):
"""Transport used for request processing."""
return self._transport
@property
def transport_pair(self):
"""Reader and writer used for request processing."""
return (self._reader, self._writer)
@reify
def cookies(self):
"""Return request cookies.
A read-only dictionary-like object.
"""
raw = self.headers.get(hdrs.COOKIE, '')
parsed = SimpleCookie(raw)
return MappingProxyType(
{key: val.value for key, val in parsed.items()})
@property
def http_range(self, *, _RANGE=hdrs.RANGE):
"""The content of Range HTTP header.
Return a slice instance.
"""
rng = self.headers.get(_RANGE)
start, end = None, None
if rng is not None:
try:
pattern = r'^bytes=(\d*)-(\d*)$'
start, end = re.findall(pattern, rng)[0]
except IndexError: # pattern was not found in header
raise ValueError("range not in acceptible format")
end = int(end) if end else None
start = int(start) if start else None
if start is None and end is not None:
# end with no start is to return tail of content
end = -end
if start is not None and end is not None:
# end is inclusive in range header, exclusive for slice
end += 1
if start >= end:
raise ValueError('start cannot be after end')
if start is end is None: # No valid range supplied
raise ValueError('No start or end of range specified')
return slice(start, end, 1)
@property
def content(self):
"""Return raw payload stream."""
return self._payload
@property
def has_body(self):
"""Return True if request has HTTP BODY, False otherwise."""
return self._has_body
@asyncio.coroutine
def release(self):
"""Release request.
Eat unread part of HTTP BODY if present.
"""
while not self._payload.at_eof():
yield from self._payload.readany()
@asyncio.coroutine
def read(self):
"""Read request body if present.
Returns bytes object with full request content.
"""
if self._read_bytes is None:
body = bytearray()
while True:
chunk = yield from self._payload.readany()
body.extend(chunk)
if self._client_max_size \
and len(body) >= self._client_max_size:
# local import to avoid circular imports
from aiohttp import web_exceptions
raise web_exceptions.HTTPRequestEntityTooLarge
if not chunk:
break
self._read_bytes = bytes(body)
return self._read_bytes
@asyncio.coroutine
def text(self):
"""Return BODY as text using encoding from .charset."""
bytes_body = yield from self.read()
encoding = self.charset or 'utf-8'
return bytes_body.decode(encoding)
@asyncio.coroutine
def json(self, *, loads=json.loads, loader=None):
"""Return BODY as JSON."""
if loader is not None:
warnings.warn(
"Using loader argument is deprecated, use loads instead",
DeprecationWarning)
loads = loader
body = yield from self.text()
return loads(body)
@asyncio.coroutine
def multipart(self, *, reader=multipart.MultipartReader):
"""Return async iterator to process BODY as multipart."""
return reader(self.headers, self.content)
@asyncio.coroutine
def post(self):
"""Return POST parameters."""
if self._post is not None:
return self._post
if self.method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if (content_type not in ('',
'application/x-www-form-urlencoded',
'multipart/form-data')):
self._post = MultiDictProxy(MultiDict())
return self._post
if self.content_type.startswith('multipart/'):
warnings.warn('To process multipart requests use .multipart'
' coroutine instead.', DeprecationWarning)
body = yield from self.read()
content_charset = self.charset or 'utf-8'
environ = {'REQUEST_METHOD': self.method,
'CONTENT_LENGTH': str(len(body)),
'QUERY_STRING': '',
'CONTENT_TYPE': self.headers.get(hdrs.CONTENT_TYPE)}
fs = cgi.FieldStorage(fp=io.BytesIO(body),
environ=environ,
keep_blank_values=True,
encoding=content_charset)
supported_transfer_encoding = {
'base64': binascii.a2b_base64,
'quoted-printable': binascii.a2b_qp
}
out = MultiDict()
_count = 1
for field in fs.list or ():
transfer_encoding = field.headers.get(
hdrs.CONTENT_TRANSFER_ENCODING, None)
if field.filename:
ff = FileField(field.name,
field.filename,
field.file, # N.B. file closed error
field.type)
if self._post_files_cache is None:
self._post_files_cache = {}
self._post_files_cache[field.name+str(_count)] = field
_count += 1
out.add(field.name, ff)
else:
value = field.value
if transfer_encoding in supported_transfer_encoding:
# binascii accepts bytes
value = value.encode('utf-8')
value = supported_transfer_encoding[
transfer_encoding](value)
out.add(field.name, value)
self._post = MultiDictProxy(out)
return self._post
def __repr__(self):
ascii_encodable_path = self.path.encode('ascii', 'backslashreplace') \
.decode('ascii')
return "<{} {} {} >".format(self.__class__.__name__,
self.method, ascii_encodable_path)
@asyncio.coroutine
def _prepare_hook(self, response):
return
yield # pragma: no cover
class Request(BaseRequest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# matchdict, route_name, handler
# or information about traversal lookup
self._match_info = None # initialized after route resolving
@property
def match_info(self):
"""Result of route resolving."""
return self._match_info
@reify
def app(self):
"""Application instance."""
return self._match_info.apps[-1]
@asyncio.coroutine
def _prepare_hook(self, response):
match_info = self._match_info
if match_info is None:
return
for app in match_info.apps:
yield from app.on_response_prepare.send(self, response)
############################################################
# HTTP Response classes
############################################################
class StreamResponse(HeadersMixin):
def __init__(self, *, status=200, reason=None, headers=None):
self._body = None
self._keep_alive = None
self._chunked = False
self._chunk_size = None
self._compression = False
self._compression_force = False
self._headers = CIMultiDict()
self._cookies = SimpleCookie()
self._req = None
self._resp_impl = None
self._eof_sent = False
self._body_length = 0
if headers is not None:
# TODO: optimize CIMultiDict extending
self._headers.extend(headers)
self._headers.setdefault(hdrs.CONTENT_TYPE, 'application/octet-stream')
self.set_status(status, reason)
@property
def prepared(self):
return self._resp_impl is not None
@property
def started(self):
warnings.warn('use Response.prepared instead', DeprecationWarning)
return self.prepared
@property
def task(self):
return getattr(self._req, 'task', None)
@property
def status(self):
return self._status
@property
def chunked(self):
return self._chunked
@property
def compression(self):
return self._compression
@property
def reason(self):
return self._reason
def set_status(self, status, reason=None):
if self.prepared:
raise RuntimeError("Cannot change the response status code after "
"the headers have been sent")
self._status = int(status)
if reason is None:
reason = ResponseImpl.calc_reason(status)
self._reason = reason
@property
def keep_alive(self):
return self._keep_alive
def force_close(self):
self._keep_alive = False
@property
def body_length(self):
return self._body_length
@property
def output_length(self):
return self._resp_impl.output_length
def enable_chunked_encoding(self, chunk_size=None):
"""Enables automatic chunked transfer encoding."""
self._chunked = True
self._chunk_size = chunk_size
def enable_compression(self, force=None):
"""Enables response compression encoding."""
# Backwards compatibility for when force was a bool <0.17.
if type(force) == bool:
force = ContentCoding.deflate if force else ContentCoding.identity
elif force is not None:
assert isinstance(force, ContentCoding), ("force should one of "
"None, bool or "
"ContentEncoding")
self._compression = True
self._compression_force = force
@property
def headers(self):
return self._headers
@property
def cookies(self):
return self._cookies
def set_cookie(self, name, value, *, expires=None,
domain=None, max_age=None, path='/',
secure=None, httponly=None, version=None):
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
elif c.get('expires') == 'Thu, 01 Jan 1970 00:00:00 GMT':
del c['expires']
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = max_age
elif 'max-age' in c:
del c['max-age']
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version
def del_cookie(self, name, *, domain=None, path='/'):
"""Delete cookie.
Creates new empty expired cookie.
"""
# TODO: do we need domain/path here?
self._cookies.pop(name, None)
self.set_cookie(name, '', max_age=0,
expires="Thu, 01 Jan 1970 00:00:00 GMT",
domain=domain, path=path)
@property
def content_length(self):
# Just a placeholder for adding setter
return super().content_length
@content_length.setter
def content_length(self, value):
if value is not None:
value = int(value)
# TODO: raise error if chunked enabled
self.headers[hdrs.CONTENT_LENGTH] = str(value)
else:
self.headers.pop(hdrs.CONTENT_LENGTH, None)
@property
def content_type(self):
# Just a placeholder for adding setter
return super().content_type
@content_type.setter
def content_type(self, value):
self.content_type # read header values if needed
self._content_type = str(value)
self._generate_content_type_header()
@property
def charset(self):
# Just a placeholder for adding setter
return super().charset
@charset.setter
def charset(self, value):
ctype = self.content_type # read header values if needed
if ctype == 'application/octet-stream':
raise RuntimeError("Setting charset for application/octet-stream "
"doesn't make sense, setup content_type first")
if value is None:
self._content_dict.pop('charset', None)
else:
self._content_dict['charset'] = str(value).lower()
self._generate_content_type_header()
@property
def last_modified(self, _LAST_MODIFIED=hdrs.LAST_MODIFIED):
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@last_modified.setter
def last_modified(self, value):
if value is None:
self.headers.pop(hdrs.LAST_MODIFIED, None)
elif isinstance(value, (int, float)):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value)))
elif isinstance(value, datetime.datetime):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple())
elif isinstance(value, str):
self.headers[hdrs.LAST_MODIFIED] = value
@property
def tcp_nodelay(self):
resp_impl = self._resp_impl
if resp_impl is None:
raise RuntimeError("Cannot get tcp_nodelay for "
"not prepared response")
return resp_impl.transport.tcp_nodelay
def set_tcp_nodelay(self, value):
resp_impl = self._resp_impl
if resp_impl is None:
raise RuntimeError("Cannot set tcp_nodelay for "
"not prepared response")
resp_impl.transport.set_tcp_nodelay(value)
@property
def tcp_cork(self):
resp_impl = self._resp_impl
if resp_impl is None:
raise RuntimeError("Cannot get tcp_cork for "
"not prepared response")
return resp_impl.transport.tcp_cork
def set_tcp_cork(self, value):
resp_impl = self._resp_impl
if resp_impl is None:
raise RuntimeError("Cannot set tcp_cork for "
"not prepared response")
resp_impl.transport.set_tcp_cork(value)
def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):
params = '; '.join("%s=%s" % i for i in self._content_dict.items())
if params:
ctype = self._content_type + '; ' + params
else:
ctype = self._content_type
self.headers[CONTENT_TYPE] = ctype
def _start_pre_check(self, request):
if self._resp_impl is not None:
if self._req is not request:
raise RuntimeError(
"Response has been started with different request.")
else:
return self._resp_impl
else:
return None
def _do_start_compression(self, coding):
if coding != ContentCoding.identity:
self.headers[hdrs.CONTENT_ENCODING] = coding.value
self._resp_impl.add_compression_filter(coding.value)
self.content_length = None
def _start_compression(self, request):
if self._compression_force:
self._do_start_compression(self._compression_force)
else:
accept_encoding = request.headers.get(
hdrs.ACCEPT_ENCODING, '').lower()
for coding in ContentCoding:
if coding.value in accept_encoding:
self._do_start_compression(coding)
return
def start(self, request):
warnings.warn('use .prepare(request) instead', DeprecationWarning)
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
return self._start(request)
@asyncio.coroutine
def prepare(self, request):
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
yield from request._prepare_hook(self)
return self._start(request)
def _start(self, request,
HttpVersion10=HttpVersion10,
HttpVersion11=HttpVersion11,
CONNECTION=hdrs.CONNECTION,
DATE=hdrs.DATE,
SERVER=hdrs.SERVER,
SET_COOKIE=hdrs.SET_COOKIE,
TRANSFER_ENCODING=hdrs.TRANSFER_ENCODING):
self._req = request
keep_alive = self._keep_alive
if keep_alive is None:
keep_alive = request.keep_alive
self._keep_alive = keep_alive
version = request.version
resp_impl = self._resp_impl = ResponseImpl(
request._writer,
self._status,
version,
not keep_alive,
self._reason)
headers = self.headers
for cookie in self._cookies.values():
value = cookie.output(header='')[1:]
headers.add(SET_COOKIE, value)
if self._compression:
self._start_compression(request)
if self._chunked:
if request.version != HttpVersion11:
raise RuntimeError("Using chunked encoding is forbidden "
"for HTTP/{0.major}.{0.minor}".format(
request.version))
resp_impl.chunked = True
if self._chunk_size:
resp_impl.add_chunking_filter(self._chunk_size)
headers[TRANSFER_ENCODING] = 'chunked'
else:
resp_impl.length = self.content_length
headers.setdefault(DATE, request.time_service.strtime())
headers.setdefault(SERVER, resp_impl.SERVER_SOFTWARE)
if CONNECTION not in headers:
if keep_alive:
if version == HttpVersion10:
headers[CONNECTION] = 'keep-alive'
else:
if version == HttpVersion11:
headers[CONNECTION] = 'close'
resp_impl.headers = headers
self._send_headers(resp_impl)
return resp_impl
def _send_headers(self, resp_impl):
# Durty hack required for
# https://github.com/KeepSafe/aiohttp/issues/1093
# File sender may override it
resp_impl.send_headers()
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), \
"data argument must be byte-ish (%r)" % type(data)
if self._eof_sent:
raise RuntimeError("Cannot call write() after write_eof()")
if self._resp_impl is None:
raise RuntimeError("Cannot call write() before start()")
if data:
return self._resp_impl.write(data)
else:
return ()
@asyncio.coroutine
def drain(self):
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.transport.drain()
@asyncio.coroutine
def write_eof(self):
if self._eof_sent:
return
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self._resp_impl.write_eof()
self._eof_sent = True
self._body_length = self._resp_impl.body_length
self._req = None
self._resp_impl = None
def __repr__(self):
if self._eof_sent:
info = "eof"
elif self.started:
info = "{} {} ".format(self._req.method, self._req.path)
else:
info = "not started"
return "<{} {} {}>".format(self.__class__.__name__,
self.reason, info)
class Response(StreamResponse):
def __init__(self, *, body=None, status=200,
reason=None, text=None, headers=None, content_type=None,
charset=None):
if body is not None and text is not None:
raise ValueError("body and text are not allowed together")
if headers is None:
headers = CIMultiDict()
elif not isinstance(headers, (CIMultiDict, CIMultiDictProxy)):
headers = CIMultiDict(headers)
if content_type is not None and ";" in content_type:
raise ValueError("charset must not be in content_type "
"argument")
if text is not None:
if hdrs.CONTENT_TYPE in headers:
if content_type or charset:
raise ValueError("passing both Content-Type header and "
"content_type or charset params "
"is forbidden")
else:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError("text argument must be str (%r)" %
type(text))
if content_type is None:
content_type = 'text/plain'
if charset is None:
charset = 'utf-8'
headers[hdrs.CONTENT_TYPE] = (
content_type + '; charset=' + charset)
body = text.encode(charset)
text = None
else:
if hdrs.CONTENT_TYPE in headers:
if content_type is not None or charset is not None:
raise ValueError("passing both Content-Type header and "
"content_type or charset params "
"is forbidden")
else:
if content_type is not None:
if charset is not None:
content_type += '; charset=' + charset
headers[hdrs.CONTENT_TYPE] = content_type
super().__init__(status=status, reason=reason, headers=headers)
if text is not None:
self.text = text
elif body is None and hdrs.CONTENT_LENGTH in headers:
self._body = None
else:
self.body = body
@property
def body(self):
return self._body
@body.setter
def body(self, body):
if body is not None and not isinstance(body, bytes):
raise TypeError("body argument must be bytes (%r)" % type(body))
self._body = body
if body is not None:
self.content_length = len(body)
else:
self.content_length = 0
@property
def text(self):
if self._body is None:
return None
return self._body.decode(self.charset or 'utf-8')
@text.setter
def text(self, text):
if text is not None and not isinstance(text, str):
raise TypeError("text argument must be str (%r)" % type(text))
if self.content_type == 'application/octet-stream':
self.content_type = 'text/plain'
if self.charset is None:
self.charset = 'utf-8'
self.body = text.encode(self.charset)
@asyncio.coroutine
def write_eof(self):
body = self._body
if (body is not None and
self._req.method != hdrs.METH_HEAD and
self._status not in [204, 304]):
self.write(body)
yield from super().write_eof()
def json_response(data=sentinel, *, text=None, body=None, status=200,
reason=None, headers=None, content_type='application/json',
dumps=json.dumps):
if data is not sentinel:
if text or body:
raise ValueError(
"only one of data, text, or body should be specified"
)
else:
text = dumps(data)
return Response(text=text, body=body, status=status, reason=reason,
headers=headers, content_type=content_type)
|
|
from django.shortcuts import render, redirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth import authenticate, login as auth_login, logout as auth_logout
from .models import *
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
# 2.5MB - 2621440
# 5MB - 5242880
# 10MB - 10485760
# 20MB - 20971520
# 50MB - 5242880
# 100MB - 104857600
# 250MB - 214958080
# 500MB - 429916160
MAX_UPLOAD_SIZE = 2 * 1024 * 1024
#file size Handler
def size_checker(file):
if file:
if file.size > MAX_UPLOAD_SIZE:
return False
else:
return True
else:
return True
# Admin View
def login_admin(request):
template = 'oktan/login-admin.html'
if request.user.is_authenticated and request.user.is_staff:
return redirect('oktansite:admin_dashboard')
if request.method == 'GET':
return render(request, template)
else:
email = request.POST['email']
password = request.POST['password']
acc = authenticate(email=email, password=password)
if acc is not None and acc.is_staff:
auth_login(request, acc)
return redirect('oktansite:admin_dashboard')
else:
return render(request, template)
def admin_dashboard(request, success=None, deleted=None):
template = 'oktan/admin-dashboard.html'
if request.user.is_staff:
list_peserta = Team.objects.all()
list_news = News.objects.all()
timeline = Timeline.objects.all()
return render(request, template,{
'list_peserta': list_peserta,
'list_news': list_news,
'timeline': timeline,
'success': success,
'deleted': deleted,
})
else:
return redirect('oktansite:index')
def search_peserta(request):
template = 'oktan/daftarpendaftar.html'
if request.user.is_staff:
if (request.method == "POST"):
if 'keyword' in request.POST:
keyword = request.POST['keyword']
else:
keyword = None
if 'opt' in request.POST:
opt = request.POST['opt']
else:
opt = None
else:
if 'keyword' in request.GET:
keyword = request.GET['keyword']
else:
keyword = None
if 'opt' in request.GET:
opt = request.GET['opt']
else:
opt = None
list_peserta = Team.objects.get_queryset().order_by('id')
peserta_found = []
if keyword:
for m in list_peserta:
if (keyword.lower() in m.team_name.lower()) or (keyword.lower() in m.school_name.lower()):
peserta_found.append(m)
else:
peserta_found = list_peserta
target = []
if opt:
for m in peserta_found:
if opt == 'bayar' and m.proof_of_payment:
target.append(m)
elif opt == 'belumbayar' and not m.proof_of_payment:
target.append(m)
elif not opt:
target.append(m)
else:
target = peserta_found
paginator = Paginator(target, 25)
page = request.GET.get('page')
try:
peserta = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
peserta = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
peserta = paginator.page(paginator.num_pages)
return render(request, template,{
'list_peserta': peserta,
'keyword': keyword,
'opt': opt,
})
else:
return redirect('oktansite:index')
def list_peserta(request, success=None, deleted=None):
template = 'oktan/daftarpendaftar.html'
if request.user.is_staff:
list_peserta = Team.objects.get_queryset().order_by('id')
paginator = Paginator(list_peserta, 25)
page = request.GET.get('page')
try:
peserta = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
peserta = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
peserta = paginator.page(paginator.num_pages)
return render(request, template,{
'list_peserta': peserta,
'success': success,
'deleted': deleted,
'keyword': None,
'opt': None,
})
else:
return redirect('oktansite:index')
def generate_payment_proof(instance):
return "OKTAN-ITB-2017-PROVE-"+instance.team_name.upper()+"-"+'{0:05}'.format(instance.id)
def generate_code(request, id):
template = 'oktan/view_peserta.html'
if request.user.is_staff:
peserta = Team.objects.get(id=id)
peserta.proof_code = generate_payment_proof(peserta)
peserta.save()
return redirect('oktansite:view_peserta', id)
else:
return redirect('oktansite:index')
def delete_code(request, id):
template = 'oktan/view_peserta.html'
if request.user.is_staff:
peserta = Team.objects.get(id=id)
peserta.proof_code = ""
peserta.save()
return redirect('oktansite:view_peserta', id)
else:
return redirect('oktansite:index')
def delete_peserta(request, id):
peserta = Team.objects.get(id=id)
peserta.delete()
return redirect('oktansite:admin_dashboard')
from django.core.exceptions import ObjectDoesNotExist
def view_peserta(request, id):
template = 'oktan/view_peserta.html'
if request.user.is_staff:
peserta = Team.objects.get(pk=id)
try:
account_peserta = Account.objects.get(team=peserta)
except ObjectDoesNotExist:
account_peserta = None
return render(request, template, {
'peserta': peserta,
'account_peserta': account_peserta,
})
else:
return redirect('oktansite:index')
def add_news(request):
template = 'oktan/addnews.html'
if request.user.is_staff:
if request.method == "POST":
title = request.POST['title']
body = request.POST['body']
article = News()
article.title = title
article.text = body
attachment = None
image = None
if 'attachment' in request.FILES:
attachment = request.FILES['attachment']
if 'image' in request.FILES:
image = request.FILES['image']
valid = True
if attachment or image:
if attachment and valid:
if size_checker(attachment):
article.attachment = attachment
else:
valid = False
if image and valid:
if size_checker(attachment):
article.image = image
else:
valid = False
if valid:
article.save()
return redirect('oktansite:admin_dashboard')
else:
return render(request, template,{
"msg" : "File Size too Big",
})
else:
return render(request, template)
else:
return redirect('oktansite:index')
def edit_news(request, id):
template = 'oktan/edit_news.html'
news = News.objects.get(pk=id)
if request.user.is_staff:
if request.method == "POST":
news.title = request.POST['title']
news.text = request.POST['body']
attachment = None
image = None
if 'attachment' in request.FILES:
attachment = request.FILES['attachment']
if 'image' in request.FILES:
image = request.FILES['image']
valid = True
if attachment or image:
if attachment and valid:
if size_checker(attachment):
news.attachment = attachment
else:
valid = False
if image and valid:
if size_checker(attachment):
news.image = image
else:
valid = False
if valid:
news.save()
return redirect('oktansite:admin_dashboard')
else:
return render(request, template,{
"msg" : "File Size too Big",
})
return redirect('oktansite:admin_dashboard')
else:
return render(request, template, {'news': news})
else:
return redirect('oktansite:index')
def delete_news(request, id):
if request.user.is_staff:
news = News.objects.get(pk=id)
news.delete()
return redirect('oktansite:admin_dashboard')
else:
return redirect('oktansite:index')
def add_sponsor(request):
template = 'oktan/addsponsor.html'
sponsor = Sponsor.objects.first()
if request.user.is_staff:
if request.method == "POST":
if sponsor:
sponsor.src = request.FILES['sponsor']
sponsor.save()
else:
sponsor = Sponsor()
sponsor.src = request.FILES['sponsor']
sponsor.save()
return redirect('oktansite:addsponsor')
else:
return render(request, template, {
'sponsor': sponsor,
})
else:
return redirect('oktansite:index')
def add_media(request):
template = 'oktan/addmedia.html'
media = MediaPartner.objects.first()
if request.user.is_staff:
if request.method == "POST":
if media:
media.src = request.FILES['media']
media.save()
else:
media= MediaPartner()
media.src = request.FILES['media']
media.save()
return redirect('oktansite:add_media')
else:
return render(request, template, {
'media': media,
})
else:
return redirect('oktansite:index')
def edit_about(request):
template = 'oktan/editabout.html'
if request.user.is_staff:
about = About.objects.first()
if request.method == "POST":
if (about):
about.text = request.POST['text']
about.save()
else:
about = About()
about.text = request.POST['text']
about.save()
return redirect('oktansite:editabout')
else:
return render(request, template, {
'about': about
})
else:
return redirect('oktansite:index')
def add_timeline(request):
template = 'oktan/add_timeline.html'
if request.user.is_staff:
if request.method == "POST":
timeline = Timeline()
timeline.tanggal = request.POST['tanggal']
timeline.text = request.POST['body']
timeline.save()
return redirect('oktansite:admin_dashboard')
else:
return render(request, template)
else:
return redirect('oktansite:index')
def edit_timeline(request, id):
template = 'oktan/edit_timeline.html'
timeline = Timeline.objects.get(pk=id)
if request.user.is_staff:
if request.method == "POST":
timeline.tanggal = request.POST['tanggal']
timeline.text = request.POST['body']
timeline.save()
return redirect('oktansite:admin_dashboard')
else:
return render(request, template, {
'timeline': timeline,
})
else:
return redirect('oktansite:index')
def delete_timeline(request, id):
if request.user.is_staff:
timeline = Timeline.objects.get(pk=id)
timeline.delete()
return redirect('oktansite:admin_dashboard')
else:
return redirect('oktansite:index')
def admin_logout(request):
if request.user.is_staff:
auth_logout(request)
return redirect(reverse('oktansite:index'))
else:
return redirect('oktansite:index')
# Site View
def index(request):
timeline = Timeline.objects.all()
sponsor = Sponsor.objects.first()
media_partner = MediaPartner.objects.first()
template = 'oktan/index.html'
return render(request, template, {
'timeline': timeline,
'sponsor': sponsor,
'media_partner': media_partner
})
def about(request):
sponsor = Sponsor.objects.first()
media_partner = MediaPartner.objects.first()
about = About.objects.first()
template = 'oktan/about.html'
return render(request, template, {
'sponsor': sponsor,
'media_partner': media_partner,
'about': about,
})
def news(request):
template = 'oktan/news.html'
articles = []
sponsor = Sponsor.objects.first()
media_partner = MediaPartner.objects.first()
news_list = News.objects.order_by("-pub_date")
count = 0
for news in news_list:
articles.append(news)
count += 1
if count == 5:
break
return render(request, template, {
'news': articles,
'sponsor': sponsor,
'media_partner': media_partner,
})
def blog(request, id):
template = 'oktan/post.html'
sponsor = Sponsor.objects.first()
media_partner = MediaPartner.objects.first()
article = News.objects.get(pk = id)
return render(request, template, {
'article' : article,
'sponsor': sponsor,
'media_partner': media_partner,
})
def post(request):
template = 'oktan/post.html'
return render(request, template)
def gallery(request):
sponsor = Sponsor.objects.first()
media_partner = MediaPartner.objects.first()
template = 'oktan/gallery.html'
return render(request, template, {
'sponsor': sponsor,
'media_partner': media_partner,
})
def contact(request):
sponsor = Sponsor.objects.first()
media_partner = MediaPartner.objects.first()
template = 'oktan/contact.html'
return render(request, template,{
'sponsor': sponsor,
'media_partner': media_partner,
})
def login_page(request):
template = 'oktan/login.html'
return render(request, template)
@login_required
def administration(request):
print (settings.MEDIA_ROOT)
template = 'oktan/administrasi.html'
team = request.user.team
if request.method == 'GET':
return render(request, template, {
'team': team
})
else:
try:
obj = Team.objects.get(pk=team.id)
obj.school_name = request.POST["school_name"]
obj.supervisor_name = request.POST["supervisor_name"]
obj.team_name = request.POST["team_name"]
RAYON_ENUM ={
'bali': 'Bali',
'banyuwangi': 'Banyuwangi - Banyuwangi, Jember, Bondowoso, Situ bondo',
'bandung': 'Bandung - Bandung Raya, Sumedang, Subang',
'bogor': 'Bogor - Bogor, Sukabumi, Cianjur',
'cirebon': 'Cirebon - Cirebon, Majalengka, Indramayu, Tegal, Kuningan',
'karawang': 'Karawang - Karawang, Bekasi, Purwakarta',
'jakarta': 'Jakarta - Jakarta Raya',
'lampung': 'Lampung',
'makassar': 'Makassar - Sulawesi dan Indonesia Timur',
'malang': 'Malang - Malang, Lumajang, Mojokerto',
'medan': 'Medan - Banda Aceh, Medan',
'padang': 'Padang - Padang, Jambi, Riau',
'palembang': 'Palembang - Palembang, Bangka, Bengkulu',
'samarinda': 'Samarinda - Kalimantan',
'semarang': 'Semarang - Semarang, Demak, Salatiga, Kudus, Jepara',
'serang': 'Serang - Serang, Pandeglang, Cilegon, Lebak, Merak',
'surakarta': 'Surakarta - Surakarta, Boyolali, Karanganyar, Wonogiri',
'surabaya': 'Surabaya - Surabaya, Gresik, Lamongan, Mojokerto',
'tangerang': 'Tangerang',
'tasikmalaya': 'Tasikmalaya - Tasikmalaya, Ciamis, Garut, Banjar',
'yogyakarta': 'Yogyakarta - Yogyakarta Raya',
}
rayon = request.POST["rayon"]
if rayon:
obj.rayon = RAYON_ENUM[rayon]
modified = True
message = ''
if 'payment_proof' in request.FILES:
proof_of_payment = request.FILES['payment_proof']
if size_checker(proof_of_payment):
obj.proof_of_payment = proof_of_payment
else:
message = "File Size is too big!"
modified = False
if modified:
message = "Data Modified!"
obj.save()
return render(request, template, {
'message': message,
'team': obj
})
else:
return render(request, template, {
'message': message,
'team': team
})
except ValidationError as e:
message = ';'.join(e.messages)
return render(request, template, {
'message': message,
'team': team
})
return render(request, template)
@login_required
def user(request):
template = 'oktan/user.html'
return render(request, template)
@login_required
def member(request):
template = 'oktan/member.html'
team = request.user.team
if request.method == 'GET':
return render(request, template, {
'team': team
})
else:
obj = Team.objects.get(pk=team.id)
obj.student_name_1 = request.POST["student_name_1"]
obj.student_id_number_1 = request.POST["student_id_number_1"]
obj.student_phone_number_1 = request.POST["student_phone_number_1"]
obj.student_id_line_1 = request.POST["student_id_line_1"]
obj.student_name_2 = request.POST["student_name_2"]
obj.student_id_number_2 = request.POST["student_id_number_2"]
obj.student_phone_number_2 = request.POST["student_phone_number_2"]
obj.student_id_line_2 = request.POST["student_id_line_2"]
if 'student_card_image_1' in request.FILES:
obj.student_card_image_1 = request.FILES['student_card_image_1']
if 'student_card_image_2' in request.FILES:
obj.student_card_image_2 = request.FILES['student_card_image_2']
if (size_checker(obj.student_card_image_2) and size_checker(obj.student_card_image_1)):
obj.save()
message = "Data Modified!"
return render(request, template, {
'message': message,
'team': obj
})
else:
message = "File Size is too Big"
return render(request, template, {
'message': message,
'team': team
})
def login(request):
if request.user.is_authenticated:
return redirect('oktansite:administrasi')
template = 'oktan/login.html'
if request.method == 'GET':
return render(request, template)
else:
email = request.POST['email']
password = request.POST['password']
acc = authenticate(email=email, password=password)
if acc is not None:
auth_login(request, acc)
return redirect('oktansite:user')
else:
return render(request, template)
@login_required
def logout(request):
auth_logout(request)
return redirect(reverse('oktansite:index'))
def register(request):
template = 'oktan/login.html'
if request.method == 'POST':
email= request.POST['email']
password = request.POST['password']
team_name = request.POST['team_name']
supervisor= request.POST['supervisor']
school_name = request.POST['school_name']
# Register account
try:
new_team = Team()
new_team.school_name = school_name
new_team.supervisor_name = supervisor
new_team.team_name = team_name
new_team.save()
new_account = Account()
new_account.email = email
new_account.password = password
new_account.team = new_team
new_account.save()
acc = authenticate(email=email, password=password)
print (acc)
if acc is not None:
auth_login(request, acc)
return redirect('oktansite:administrasi')
else:
return render(request, template)
except ValidationError as e:
message = ';'.join(e.messages)
return render(request, template, {
'error_message': message
})
return render(request, template)
|
|
from collections import defaultdict
import os
import yaml
from flask import Markup, escape
from lxml.html import document_fromstring
class SearchSummary(object):
"""Provides a paragraph summarising the search performed and results"""
WRAP_PRE_TAG = '<p class="app-search-summary govuk-body-s">'
WRAP_POST_TAG = '</p>'
COUNT_PRE_TAG = '<span class="app-search-summary__count">'
COUNT_POST_TAG = '</span>'
KEYWORDS_PRE_TAG = '<strong>'
KEYWORDS_POST_TAG = '</strong>'
LOT_PRE_TAG = '<strong>'
LOT_POST_TAG = '</strong>'
@staticmethod
def write_parts_as_sentence(parts):
sentence = [part for part in parts if part is not None]
if len(sentence) > 0:
return u" ".join(sentence)
@staticmethod
def write_list_as_sentence(input_list, final_conjunction):
if len(input_list) == 1:
return u"{}".format(input_list[0])
else:
start = input_list[0:-1]
end = input_list[-1]
formatted_conjunction = " {} ".format(final_conjunction)
return formatted_conjunction.join([u', '.join(start), end])
def __init__(self, results_total, request_args, filter_groups, lots_by_slug):
self._lots_by_slug = lots_by_slug
self._set_initial_sentence(results_total, request_args)
self.filter_groups = self._group_request_filters(
request_args,
filter_groups
)
self.filters_fragments = []
SummaryRules.load_rules()
for group in self.filter_groups:
group_id = group[0]
filters = group[1]
group_rules = SummaryRules(group_id)
if group_rules.exist is True:
self.filters_fragments.append(
SummaryFragment(
group_id=group_id,
filters=filters,
rules=group_rules)
)
def _set_initial_sentence(self, results_total, request_args):
keywords = escape(request_args.get('q', ''))
lot_label = (self._lots_by_slug.get(request_args.get('lot'), {}).get('name')
or 'All categories')
lot = u"{}{}{}".format(
SearchSummary.LOT_PRE_TAG,
lot_label,
SearchSummary.LOT_POST_TAG
)
if int(results_total) == 1:
self.count = '1'
count_string = 'result found'
else:
self.count = str(results_total)
count_string = u"results found"
if keywords != '':
self.sentence = u"{} containing {}{}{} in {}".format(
count_string,
SearchSummary.KEYWORDS_PRE_TAG,
keywords,
SearchSummary.KEYWORDS_POST_TAG,
lot)
else:
self.sentence = u"{} in {}".format(count_string, lot)
def markup(self, wrap=False):
def _get_fragment_string(fragment):
return fragment.str()
pre_wrap = SearchSummary.WRAP_PRE_TAG if wrap else ''
post_wrap = SearchSummary.WRAP_POST_TAG if wrap else ''
parts = [self.get_starting_sentence()]
if len(self.filters_fragments) > 0:
fragment_strings = list(
map(_get_fragment_string, self.filters_fragments))
parts.append(SearchSummary.write_list_as_sentence(
fragment_strings, u"and"))
return Markup(pre_wrap + u" ".join(parts) + post_wrap)
def text_content(self):
return document_fromstring(self.markup()).text_content()
def get_starting_sentence(self):
return u"{}{}{} {}".format(
self.COUNT_PRE_TAG,
self.count,
self.COUNT_POST_TAG,
self.sentence
)
def _group_request_filters(self, request_args, filter_groups):
"""arranges the filters from the request into filter groups"""
def _insert_filters(target_dict, filters, label):
for f in filters:
if f.get('children'):
_insert_filters(target_dict, f.get('children'), label)
target_dict[(f['name'], f['value'])] = (f, label)
def _sort_groups(groups):
sorted_groups = []
filter_group_order = [group['label'] for group in filter_groups]
for group in filter_group_order:
if group in groups:
sorted_groups.append((group, groups[group]))
return sorted_groups
# build map from key/value pair to the relevant filter
# ideally this would be in the filter_groups data structure already, but it isn't
all_filters_by_kv = dict()
for filter_group in filter_groups:
_insert_filters(all_filters_by_kv, filter_group['filters'], filter_group['label'])
groups = defaultdict(list)
for filter_name, filter_values in request_args.lists():
if filter_name in ('lot', 'q', 'page'):
continue
for filter_value in filter_values:
filter_instance, filter_group_label = all_filters_by_kv[(filter_name, filter_value)]
groups[filter_group_label].append(filter_instance['label'])
return _sort_groups(groups)
class SummaryRules(object):
"""Provides access to the rules for a search summary fragment"""
_rules = {}
loaded = False
@staticmethod
def load_rules(manifest=os.path.join(
os.path.dirname(__file__),
'..',
'helpers',
'search_summary_manifest.yml'
)):
with open(manifest, 'r') as file:
summary_rules = yaml.safe_load(file)
SummaryRules._rules = {rule['id']: rule for rule in summary_rules}
SummaryRules.loaded = True
def __init__(self, group_id):
self.exist = group_id in SummaryRules._rules
if self.exist is True:
self._rules = SummaryRules._rules[group_id]
if self.get('filterRules') is not None:
self.filter_rules_ids = [
rule['id'] for rule in self._rules['filterRules']]
def get(self, key):
if key in self._rules:
return self._rules[key]
def add_filter_preposition(self, filter_id=None, filter_string=None):
preposition = self._get_filter_preposition(filter_id)
return SearchSummary.write_parts_as_sentence(
[preposition, filter_string])
def _get_filter_preposition(self, filter):
if hasattr(self, 'filter_rules_ids'):
try:
index = self.filter_rules_ids.index(filter)
except (AttributeError, TypeError, ValueError):
return None
return self._rules['filterRules'][index]['preposition']
class SummaryFragment(object):
"""Provides access to a search summary fragment"""
PRE_TAG = u'<strong>'
POST_TAG = u'</strong>'
FINAL_CONJUNCTION = u'and'
def __init__(self, group_id, filters, rules):
self.id = group_id
self.rules = rules
self.form = 'singular'
if len(filters) > 1:
self.form = 'plural'
self.filters = self._get_filters(filters)
self.filters_preposition = self.rules.get('filtersPreposition')
self.label_string = self._get_label()
def str(self):
filters_string = SearchSummary.write_list_as_sentence(
self.filters, self.rules.get('conjunction')
)
return SearchSummary.write_parts_as_sentence([
self.label_string,
self.rules.get('filtersPreposition'),
filters_string
])
def _get_label(self):
preposition = self.rules.get('labelPreposition')
label = self.rules.get('label')
if label is not None:
return SearchSummary.write_parts_as_sentence(
[preposition, label[self.form]])
else:
return SearchSummary.write_parts_as_sentence(
[preposition, label])
def _get_filters(self, filters):
def _mark_up_filter(filter):
return u"{}{}{}".format(
SummaryFragment.PRE_TAG,
filter,
SummaryFragment.POST_TAG,
)
processed_filters = []
if self.rules.get('filterRules') is None:
processed_filters = [_mark_up_filter(filter) for filter in filters]
return processed_filters
else:
for filter in filters:
filter_string = _mark_up_filter(filter)
if filter in self.rules.filter_rules_ids:
filter_string = self.rules.add_filter_preposition(
filter_id=filter, filter_string=filter_string)
processed_filters.append(filter_string)
return processed_filters
|
|
# Copyright 2014 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import excutils
from neutron.common import exceptions as exception
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware import nsxlib
from neutron.plugins.vmware.nsxlib import switch
from neutron.plugins.vmware.nsxlib import versioning
# @versioning.versioned decorator makes the apparent function body
# totally unrelated to the real function. This confuses pylint :(
# pylint: disable=assignment-from-no-return
HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_DELETE = "DELETE"
HTTP_PUT = "PUT"
LROUTER_RESOURCE = "lrouter"
LROUTER_RESOURCE = "lrouter"
LROUTERPORT_RESOURCE = "lport/%s" % LROUTER_RESOURCE
LROUTERRIB_RESOURCE = "rib/%s" % LROUTER_RESOURCE
LROUTERNAT_RESOURCE = "nat/lrouter"
# Constants for NAT rules
MATCH_KEYS = ["destination_ip_addresses", "destination_port_max",
"destination_port_min", "source_ip_addresses",
"source_port_max", "source_port_min", "protocol"]
LOG = log.getLogger(__name__)
def _prepare_lrouter_body(name, neutron_router_id, tenant_id,
router_type, distributed=None, **kwargs):
body = {
"display_name": utils.check_and_truncate(name),
"tags": utils.get_tags(os_tid=tenant_id,
q_router_id=neutron_router_id),
"routing_config": {
"type": router_type
},
"type": "LogicalRouterConfig",
"replication_mode": cfg.CONF.NSX.replication_mode,
}
# add the distributed key only if not None (ie: True or False)
if distributed is not None:
body['distributed'] = distributed
if kwargs:
body["routing_config"].update(kwargs)
return body
def _create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
display_name, nexthop, distributed=None):
implicit_routing_config = {
"default_route_next_hop": {
"gateway_ip_address": nexthop,
"type": "RouterNextHop"
},
}
lrouter_obj = _prepare_lrouter_body(
display_name, neutron_router_id, tenant_id,
"SingleDefaultRouteImplicitRoutingConfig",
distributed=distributed,
**implicit_routing_config)
return nsxlib.do_request(HTTP_POST,
nsxlib._build_uri_path(LROUTER_RESOURCE),
jsonutils.dumps(lrouter_obj), cluster=cluster)
def create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
display_name, nexthop):
"""Create a NSX logical router on the specified cluster.
:param cluster: The target NSX cluster
:param tenant_id: Identifier of the Openstack tenant for which
the logical router is being created
:param display_name: Descriptive name of this logical router
:param nexthop: External gateway IP address for the logical router
:raise NsxApiException: if there is a problem while communicating
with the NSX controller
"""
return _create_implicit_routing_lrouter(
cluster, neutron_router_id, tenant_id, display_name, nexthop)
def create_implicit_routing_lrouter_with_distribution(
cluster, neutron_router_id, tenant_id, display_name,
nexthop, distributed=None):
"""Create a NSX logical router on the specified cluster.
This function also allows for creating distributed lrouters
:param cluster: The target NSX cluster
:param tenant_id: Identifier of the Openstack tenant for which
the logical router is being created
:param display_name: Descriptive name of this logical router
:param nexthop: External gateway IP address for the logical router
:param distributed: True for distributed logical routers
:raise NsxApiException: if there is a problem while communicating
with the NSX controller
"""
return _create_implicit_routing_lrouter(
cluster, neutron_router_id, tenant_id,
display_name, nexthop, distributed)
def create_explicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
display_name, nexthop, distributed=None):
lrouter_obj = _prepare_lrouter_body(
display_name, neutron_router_id, tenant_id,
"RoutingTableRoutingConfig", distributed=distributed)
router = nsxlib.do_request(HTTP_POST,
nsxlib._build_uri_path(LROUTER_RESOURCE),
jsonutils.dumps(lrouter_obj), cluster=cluster)
default_gw = {'prefix': '0.0.0.0/0', 'next_hop_ip': nexthop}
create_explicit_route_lrouter(cluster, router['uuid'], default_gw)
return router
def delete_lrouter(cluster, lrouter_id):
nsxlib.do_request(HTTP_DELETE,
nsxlib._build_uri_path(LROUTER_RESOURCE,
resource_id=lrouter_id),
cluster=cluster)
def get_lrouter(cluster, lrouter_id):
return nsxlib.do_request(HTTP_GET,
nsxlib._build_uri_path(
LROUTER_RESOURCE,
resource_id=lrouter_id,
relations='LogicalRouterStatus'),
cluster=cluster)
def query_lrouters(cluster, fields=None, filters=None):
return nsxlib.get_all_query_pages(
nsxlib._build_uri_path(LROUTER_RESOURCE,
fields=fields,
relations='LogicalRouterStatus',
filters=filters),
cluster)
def get_lrouters(cluster, tenant_id, fields=None, filters=None):
# FIXME(salv-orlando): Fields parameter is ignored in this routine
actual_filters = {}
if filters:
actual_filters.update(filters)
if tenant_id:
actual_filters['tag'] = tenant_id
actual_filters['tag_scope'] = 'os_tid'
lrouter_fields = "uuid,display_name,fabric_status,tags"
return query_lrouters(cluster, lrouter_fields, actual_filters)
def update_implicit_routing_lrouter(cluster, r_id, display_name, nexthop):
lrouter_obj = get_lrouter(cluster, r_id)
if not display_name and not nexthop:
# Nothing to update
return lrouter_obj
# It seems that this is faster than the doing an if on display_name
lrouter_obj["display_name"] = (utils.check_and_truncate(display_name) or
lrouter_obj["display_name"])
if nexthop:
nh_element = lrouter_obj["routing_config"].get(
"default_route_next_hop")
if nh_element:
nh_element["gateway_ip_address"] = nexthop
return nsxlib.do_request(HTTP_PUT,
nsxlib._build_uri_path(LROUTER_RESOURCE,
resource_id=r_id),
jsonutils.dumps(lrouter_obj),
cluster=cluster)
def get_explicit_routes_lrouter(cluster, router_id, protocol_type='static'):
static_filter = {'protocol': protocol_type}
existing_routes = nsxlib.do_request(
HTTP_GET,
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
filters=static_filter,
fields="*",
parent_resource_id=router_id),
cluster=cluster)['results']
return existing_routes
def delete_explicit_route_lrouter(cluster, router_id, route_id):
nsxlib.do_request(HTTP_DELETE,
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
resource_id=route_id,
parent_resource_id=router_id),
cluster=cluster)
def create_explicit_route_lrouter(cluster, router_id, route):
next_hop_ip = route.get("nexthop") or route.get("next_hop_ip")
prefix = route.get("destination") or route.get("prefix")
uuid = nsxlib.do_request(
HTTP_POST,
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
parent_resource_id=router_id),
jsonutils.dumps({
"action": "accept",
"next_hop_ip": next_hop_ip,
"prefix": prefix,
"protocol": "static"
}),
cluster=cluster)['uuid']
return uuid
def update_explicit_routes_lrouter(cluster, router_id, routes):
# Update in bulk: delete them all, and add the ones specified
# but keep track of what is been modified to allow roll-backs
# in case of failures
nsx_routes = get_explicit_routes_lrouter(cluster, router_id)
try:
deleted_routes = []
added_routes = []
# omit the default route (0.0.0.0/0) from the processing;
# this must be handled through the nexthop for the router
for route in nsx_routes:
prefix = route.get("destination") or route.get("prefix")
if prefix != '0.0.0.0/0':
delete_explicit_route_lrouter(cluster,
router_id,
route['uuid'])
deleted_routes.append(route)
for route in routes:
prefix = route.get("destination") or route.get("prefix")
if prefix != '0.0.0.0/0':
uuid = create_explicit_route_lrouter(cluster,
router_id, route)
added_routes.append(uuid)
except api_exc.NsxApiException:
LOG.exception(_LE('Cannot update NSX routes %(routes)s for '
'router %(router_id)s'),
{'routes': routes, 'router_id': router_id})
# Roll back to keep NSX in consistent state
with excutils.save_and_reraise_exception():
if nsx_routes:
if deleted_routes:
for route in deleted_routes:
create_explicit_route_lrouter(cluster,
router_id, route)
if added_routes:
for route_id in added_routes:
delete_explicit_route_lrouter(cluster,
router_id, route_id)
return nsx_routes
def get_default_route_explicit_routing_lrouter_v33(cluster, router_id):
static_filter = {"protocol": "static",
"prefix": "0.0.0.0/0"}
default_route = nsxlib.do_request(
HTTP_GET,
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
filters=static_filter,
fields="*",
parent_resource_id=router_id),
cluster=cluster)["results"][0]
return default_route
def get_default_route_explicit_routing_lrouter_v32(cluster, router_id):
# Scan all routes because 3.2 does not support query by prefix
all_routes = get_explicit_routes_lrouter(cluster, router_id)
for route in all_routes:
if route['prefix'] == '0.0.0.0/0':
return route
def update_default_gw_explicit_routing_lrouter(cluster, router_id, next_hop):
default_route = get_default_route_explicit_routing_lrouter(cluster,
router_id)
if next_hop != default_route["next_hop_ip"]:
new_default_route = {"action": "accept",
"next_hop_ip": next_hop,
"prefix": "0.0.0.0/0",
"protocol": "static"}
nsxlib.do_request(HTTP_PUT,
nsxlib._build_uri_path(
LROUTERRIB_RESOURCE,
resource_id=default_route['uuid'],
parent_resource_id=router_id),
jsonutils.dumps(new_default_route),
cluster=cluster)
def update_explicit_routing_lrouter(cluster, router_id,
display_name, next_hop, routes=None):
update_implicit_routing_lrouter(cluster, router_id, display_name, next_hop)
if next_hop:
update_default_gw_explicit_routing_lrouter(cluster,
router_id, next_hop)
if routes is not None:
return update_explicit_routes_lrouter(cluster, router_id, routes)
def query_lrouter_lports(cluster, lr_uuid, fields="*",
filters=None, relations=None):
uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
parent_resource_id=lr_uuid,
fields=fields, filters=filters,
relations=relations)
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results']
def create_router_lport(cluster, lrouter_uuid, tenant_id, neutron_port_id,
display_name, admin_status_enabled, ip_addresses,
mac_address=None):
"""Creates a logical port on the assigned logical router."""
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=display_name,
tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id),
ip_addresses=ip_addresses,
type="LogicalRouterPortConfig"
)
# Only add the mac_address to lport_obj if present. This is because
# when creating the fake_ext_gw there is no mac_address present.
if mac_address:
lport_obj['mac_address'] = mac_address
path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
parent_resource_id=lrouter_uuid)
result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
cluster=cluster)
LOG.debug("Created logical port %(lport_uuid)s on "
"logical router %(lrouter_uuid)s",
{'lport_uuid': result['uuid'],
'lrouter_uuid': lrouter_uuid})
return result
def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid,
tenant_id, neutron_port_id, display_name,
admin_status_enabled, ip_addresses):
"""Updates a logical port on the assigned logical router."""
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=display_name,
tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id),
ip_addresses=ip_addresses,
type="LogicalRouterPortConfig"
)
# Do not pass null items to NSX
for key in lport_obj.keys():
if lport_obj[key] is None:
del lport_obj[key]
path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
lrouter_port_uuid,
parent_resource_id=lrouter_uuid)
result = nsxlib.do_request(HTTP_PUT, path,
jsonutils.dumps(lport_obj),
cluster=cluster)
LOG.debug("Updated logical port %(lport_uuid)s on "
"logical router %(lrouter_uuid)s",
{'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid})
return result
def delete_router_lport(cluster, lrouter_uuid, lport_uuid):
"""Creates a logical port on the assigned logical router."""
path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid,
lrouter_uuid)
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
LOG.debug("Delete logical router port %(lport_uuid)s on "
"logical router %(lrouter_uuid)s",
{'lport_uuid': lport_uuid,
'lrouter_uuid': lrouter_uuid})
def delete_peer_router_lport(cluster, lr_uuid, ls_uuid, lp_uuid):
nsx_port = switch.get_port(cluster, ls_uuid, lp_uuid,
relations="LogicalPortAttachment")
relations = nsx_port.get('_relations')
if relations:
att_data = relations.get('LogicalPortAttachment')
if att_data:
lrp_uuid = att_data.get('peer_port_uuid')
if lrp_uuid:
delete_router_lport(cluster, lr_uuid, lrp_uuid)
def find_router_gw_port(context, cluster, router_id):
"""Retrieves the external gateway port for a NSX logical router."""
# Find the uuid of nsx ext gw logical router port
# TODO(salvatore-orlando): Consider storing it in Neutron DB
results = query_lrouter_lports(
cluster, router_id,
relations="LogicalPortAttachment")
for lport in results:
if '_relations' in lport:
attachment = lport['_relations'].get('LogicalPortAttachment')
if attachment and attachment.get('type') == 'L3GatewayAttachment':
return lport
def plug_router_port_attachment(cluster, router_id, port_id,
attachment_uuid, nsx_attachment_type,
attachment_vlan=None):
"""Attach a router port to the given attachment.
Current attachment types:
- PatchAttachment [-> logical switch port uuid]
- L3GatewayAttachment [-> L3GatewayService uuid]
For the latter attachment type a VLAN ID can be specified as well.
"""
uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, port_id, router_id,
is_attachment=True)
attach_obj = {}
attach_obj["type"] = nsx_attachment_type
if nsx_attachment_type == "PatchAttachment":
attach_obj["peer_port_uuid"] = attachment_uuid
elif nsx_attachment_type == "L3GatewayAttachment":
attach_obj["l3_gateway_service_uuid"] = attachment_uuid
if attachment_vlan:
attach_obj['vlan_id'] = attachment_vlan
else:
raise nsx_exc.InvalidAttachmentType(
attachment_type=nsx_attachment_type)
return nsxlib.do_request(
HTTP_PUT, uri, jsonutils.dumps(attach_obj), cluster=cluster)
def _create_nat_match_obj(**kwargs):
nat_match_obj = {'ethertype': 'IPv4'}
delta = set(kwargs.keys()) - set(MATCH_KEYS)
if delta:
raise Exception(_("Invalid keys for NAT match: %s"), delta)
nat_match_obj.update(kwargs)
return nat_match_obj
def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj):
LOG.debug("Creating NAT rule: %s", nat_rule_obj)
uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE,
parent_resource_id=router_id)
return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj),
cluster=cluster)
def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj):
return {"to_source_ip_address_min": min_src_ip,
"to_source_ip_address_max": max_src_ip,
"type": "SourceNatRule",
"match": nat_match_obj}
def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None):
LOG.info(_LI("No SNAT rules cannot be applied as they are not available "
"in this version of the NSX platform"))
def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None):
LOG.info(_LI("No DNAT rules cannot be applied as they are not available "
"in this version of the NSX platform"))
def create_lrouter_snat_rule_v2(cluster, router_id,
min_src_ip, max_src_ip, match_criteria=None):
nat_match_obj = _create_nat_match_obj(**match_criteria)
nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj)
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
def create_lrouter_dnat_rule_v2(cluster, router_id, dst_ip,
to_dst_port=None, match_criteria=None):
nat_match_obj = _create_nat_match_obj(**match_criteria)
nat_rule_obj = {
"to_destination_ip_address_min": dst_ip,
"to_destination_ip_address_max": dst_ip,
"type": "DestinationNatRule",
"match": nat_match_obj
}
if to_dst_port:
nat_rule_obj['to_destination_port'] = to_dst_port
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
def create_lrouter_nosnat_rule_v3(cluster, router_id, order=None,
match_criteria=None):
nat_match_obj = _create_nat_match_obj(**match_criteria)
nat_rule_obj = {
"type": "NoSourceNatRule",
"match": nat_match_obj
}
if order:
nat_rule_obj['order'] = order
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
def create_lrouter_nodnat_rule_v3(cluster, router_id, order=None,
match_criteria=None):
nat_match_obj = _create_nat_match_obj(**match_criteria)
nat_rule_obj = {
"type": "NoDestinationNatRule",
"match": nat_match_obj
}
if order:
nat_rule_obj['order'] = order
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
def create_lrouter_snat_rule_v3(cluster, router_id, min_src_ip, max_src_ip,
order=None, match_criteria=None):
nat_match_obj = _create_nat_match_obj(**match_criteria)
nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj)
if order:
nat_rule_obj['order'] = order
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
def create_lrouter_dnat_rule_v3(cluster, router_id, dst_ip, to_dst_port=None,
order=None, match_criteria=None):
nat_match_obj = _create_nat_match_obj(**match_criteria)
nat_rule_obj = {
"to_destination_ip_address": dst_ip,
"type": "DestinationNatRule",
"match": nat_match_obj
}
if to_dst_port:
nat_rule_obj['to_destination_port'] = to_dst_port
if order:
nat_rule_obj['order'] = order
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
def delete_nat_rules_by_match(cluster, router_id, rule_type,
max_num_expected,
min_num_expected=0,
raise_on_len_mismatch=True,
**kwargs):
# remove nat rules
nat_rules = query_nat_rules(cluster, router_id)
to_delete_ids = []
for r in nat_rules:
if (r['type'] != rule_type):
continue
for key, value in kwargs.iteritems():
if not (key in r['match'] and r['match'][key] == value):
break
else:
to_delete_ids.append(r['uuid'])
num_rules_to_delete = len(to_delete_ids)
if (num_rules_to_delete < min_num_expected or
num_rules_to_delete > max_num_expected):
if raise_on_len_mismatch:
raise nsx_exc.NatRuleMismatch(actual_rules=num_rules_to_delete,
min_rules=min_num_expected,
max_rules=max_num_expected)
else:
LOG.warn(_LW("Found %(actual_rule_num)d matching NAT rules, which "
"is not in the expected range (%(min_exp_rule_num)d,"
"%(max_exp_rule_num)d)"),
{'actual_rule_num': num_rules_to_delete,
'min_exp_rule_num': min_num_expected,
'max_exp_rule_num': max_num_expected})
for rule_id in to_delete_ids:
delete_router_nat_rule(cluster, router_id, rule_id)
# Return number of deleted rules - useful at least for
# testing purposes
return num_rules_to_delete
def delete_router_nat_rule(cluster, router_id, rule_id):
uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id)
nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
def query_nat_rules(cluster, router_id, fields="*", filters=None):
uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE,
parent_resource_id=router_id,
fields=fields, filters=filters)
return nsxlib.get_all_query_pages(uri, cluster)
# NOTE(salvatore-orlando): The following FIXME applies in general to
# each operation on list attributes.
# FIXME(salvatore-orlando): need a lock around the list of IPs on an iface
def update_lrouter_port_ips(cluster, lrouter_id, lport_id,
ips_to_add, ips_to_remove):
uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_id, lrouter_id)
try:
port = nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
# TODO(salvatore-orlando): Enforce ips_to_add intersection with
# ips_to_remove is empty
ip_address_set = set(port['ip_addresses'])
ip_address_set = ip_address_set - set(ips_to_remove)
ip_address_set = ip_address_set | set(ips_to_add)
# Set is not JSON serializable - convert to list
port['ip_addresses'] = list(ip_address_set)
nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(port),
cluster=cluster)
except exception.NotFound:
# FIXME(salv-orlando):avoid raising different exception
data = {'lport_id': lport_id, 'lrouter_id': lrouter_id}
msg = (_("Router Port %(lport_id)s not found on router "
"%(lrouter_id)s") % data)
LOG.exception(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
except api_exc.NsxApiException as e:
msg = _("An exception occurred while updating IP addresses on a "
"router logical port:%s") % e
LOG.exception(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
ROUTER_FUNC_DICT = {
'create_lrouter': {
2: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, },
3: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter,
1: create_implicit_routing_lrouter_with_distribution,
2: create_explicit_routing_lrouter, }, },
'update_lrouter': {
2: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, },
3: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter,
2: update_explicit_routing_lrouter, }, },
'create_lrouter_dnat_rule': {
2: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v2, },
3: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v3, }, },
'create_lrouter_snat_rule': {
2: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v2, },
3: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v3, }, },
'create_lrouter_nosnat_rule': {
2: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v2, },
3: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v3, }, },
'create_lrouter_nodnat_rule': {
2: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v2, },
3: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v3, }, },
'get_default_route_explicit_routing_lrouter': {
3: {versioning.DEFAULT_VERSION:
get_default_route_explicit_routing_lrouter_v32,
2: get_default_route_explicit_routing_lrouter_v32, }, },
}
@versioning.versioned(ROUTER_FUNC_DICT)
def create_lrouter(cluster, *args, **kwargs):
if kwargs.get('distributed', None):
v = cluster.api_client.get_version()
if (v.major, v.minor) < (3, 1):
raise nsx_exc.InvalidVersion(version=v)
return v
@versioning.versioned(ROUTER_FUNC_DICT)
def get_default_route_explicit_routing_lrouter(cluster, *args, **kwargs):
pass
@versioning.versioned(ROUTER_FUNC_DICT)
def update_lrouter(cluster, *args, **kwargs):
if kwargs.get('routes', None):
v = cluster.api_client.get_version()
if (v.major, v.minor) < (3, 2):
raise nsx_exc.InvalidVersion(version=v)
return v
@versioning.versioned(ROUTER_FUNC_DICT)
def create_lrouter_dnat_rule(cluster, *args, **kwargs):
pass
@versioning.versioned(ROUTER_FUNC_DICT)
def create_lrouter_snat_rule(cluster, *args, **kwargs):
pass
@versioning.versioned(ROUTER_FUNC_DICT)
def create_lrouter_nosnat_rule(cluster, *args, **kwargs):
pass
@versioning.versioned(ROUTER_FUNC_DICT)
def create_lrouter_nodnat_rule(cluster, *args, **kwargs):
pass
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.baremetal import base
class BaremetalClientV1(base.BaremetalClient):
"""
Base Tempest REST client for Ironic API v1.
Specific implementations must implement serialize and deserialize
methods in order to send requests to Ironic.
"""
def __init__(self, auth_provider):
super(BaremetalClientV1, self).__init__(auth_provider)
self.version = '1'
self.uri_prefix = 'v%s' % self.version
@base.handle_errors
def list_nodes(self):
"""List all existing nodes."""
return self._list_request('nodes')
@base.handle_errors
def list_chassis(self):
"""List all existing chassis."""
return self._list_request('chassis')
@base.handle_errors
def list_ports(self, **kwargs):
"""List all existing ports."""
return self._list_request('ports', **kwargs)
@base.handle_errors
def list_nodestates(self, uuid):
"""List all existing states."""
return self._list_request('/nodes/%s/states' % uuid)
@base.handle_errors
def list_ports_detail(self, **kwargs):
"""Details list all existing ports."""
return self._list_request('/ports/detail', **kwargs)
@base.handle_errors
def list_drivers(self):
"""List all existing drivers."""
return self._list_request('drivers')
@base.handle_errors
def show_node(self, uuid):
"""
Gets a specific node.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
return self._show_request('nodes', uuid)
@base.handle_errors
def show_chassis(self, uuid):
"""
Gets a specific chassis.
:param uuid: Unique identifier of the chassis in UUID format.
:return: Serialized chassis as a dictionary.
"""
return self._show_request('chassis', uuid)
@base.handle_errors
def show_port(self, uuid):
"""
Gets a specific port.
:param uuid: Unique identifier of the port in UUID format.
:return: Serialized port as a dictionary.
"""
return self._show_request('ports', uuid)
def show_driver(self, driver_name):
"""
Gets a specific driver.
:param driver_name: Name of driver.
:return: Serialized driver as a dictionary.
"""
return self._show_request('drivers', driver_name)
@base.handle_errors
def create_node(self, chassis_id, **kwargs):
"""
Create a baremetal node with the specified parameters.
:param cpu_arch: CPU architecture of the node. Default: x86_64.
:param cpu_num: Number of CPUs. Default: 8.
:param storage: Disk size. Default: 1024.
:param memory: Available RAM. Default: 4096.
:param driver: Driver name. Default: "fake"
:return: A tuple with the server response and the created node.
"""
node = {'chassis_uuid': chassis_id,
'properties': {'cpu_arch': kwargs.get('cpu_arch', 'x86_64'),
'cpu_num': kwargs.get('cpu_num', 8),
'storage': kwargs.get('storage', 1024),
'memory': kwargs.get('memory', 4096)},
'driver': kwargs.get('driver', 'fake')}
return self._create_request('nodes', 'node', node)
@base.handle_errors
def create_chassis(self, **kwargs):
"""
Create a chassis with the specified parameters.
:param description: The description of the chassis.
Default: test-chassis
:return: A tuple with the server response and the created chassis.
"""
chassis = {'description': kwargs.get('description', 'test-chassis')}
return self._create_request('chassis', 'chassis', chassis)
@base.handle_errors
def create_port(self, node_id, **kwargs):
"""
Create a port with the specified parameters.
:param node_id: The ID of the node which owns the port.
:param address: MAC address of the port.
:param extra: Meta data of the port. Default: {'foo': 'bar'}.
:param uuid: UUID of the port.
:return: A tuple with the server response and the created port.
"""
port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
'uuid': kwargs['uuid']}
if node_id is not None:
port['node_uuid'] = node_id
if kwargs['address'] is not None:
port['address'] = kwargs['address']
return self._create_request('ports', 'port', port)
@base.handle_errors
def delete_node(self, uuid):
"""
Deletes a node having the specified UUID.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('nodes', uuid)
@base.handle_errors
def delete_chassis(self, uuid):
"""
Deletes a chassis having the specified UUID.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('chassis', uuid)
@base.handle_errors
def delete_port(self, uuid):
"""
Deletes a port having the specified UUID.
:param uuid: The unique identifier of the port.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('ports', uuid)
@base.handle_errors
def update_node(self, uuid, **kwargs):
"""
Update the specified node.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the updated node.
"""
node_attributes = ('properties/cpu_arch',
'properties/cpu_num',
'properties/storage',
'properties/memory',
'driver')
patch = self._make_patch(node_attributes, **kwargs)
return self._patch_request('nodes', uuid, patch)
@base.handle_errors
def update_chassis(self, uuid, **kwargs):
"""
Update the specified chassis.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the updated chassis.
"""
chassis_attributes = ('description',)
patch = self._make_patch(chassis_attributes, **kwargs)
return self._patch_request('chassis', uuid, patch)
@base.handle_errors
def update_port(self, uuid, patch):
"""
Update the specified port.
:param uuid: The unique identifier of the port.
:param patch: List of dicts representing json patches.
:return: A tuple with the server response and the updated port.
"""
return self._patch_request('ports', uuid, patch)
@base.handle_errors
def set_node_power_state(self, node_uuid, state):
"""
Set power state of the specified node.
:param node_uuid: The unique identifier of the node.
:state: desired state to set (on/off/reboot).
"""
target = {'target': state}
return self._put_request('nodes/%s/states/power' % node_uuid,
target)
@base.handle_errors
def validate_driver_interface(self, node_uuid):
"""
Get all driver interfaces of a specific node.
:param uuid: Unique identifier of the node in UUID format.
"""
uri = '{pref}/{res}/{uuid}/{postf}'.format(pref=self.uri_prefix,
res='nodes',
uuid=node_uuid,
postf='validate')
return self._show_request('nodes', node_uuid, uri=uri)
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Trust service."""
import abc
from oslo_config import cfg
from oslo_log import log
import six
from six.moves import zip
from keystone.common import dependency
from keystone.common import manager
from keystone import exception
from keystone.i18n import _
from keystone import notifications
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@dependency.requires('identity_api')
@dependency.provider('trust_api')
class Manager(manager.Manager):
"""Default pivot point for the Trust backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
driver_namespace = 'keystone.trust'
_TRUST = "OS-TRUST:trust"
def __init__(self):
super(Manager, self).__init__(CONF.trust.driver)
@staticmethod
def _validate_redelegation(redelegated_trust, trust):
# Validate against:
# 0 < redelegation_count <= max_redelegation_count
max_redelegation_count = CONF.trust.max_redelegation_count
redelegation_depth = redelegated_trust.get('redelegation_count', 0)
if not (0 < redelegation_depth <= max_redelegation_count):
raise exception.Forbidden(
_('Remaining redelegation depth of %(redelegation_depth)d'
' out of allowed range of [0..%(max_count)d]') %
{'redelegation_depth': redelegation_depth,
'max_count': max_redelegation_count})
# remaining_uses is None
remaining_uses = trust.get('remaining_uses')
if remaining_uses is not None:
raise exception.Forbidden(
_('Field "remaining_uses" is set to %(value)s'
' while it must not be set in order to redelegate a trust'),
value=remaining_uses)
# expiry times
trust_expiry = trust.get('expires_at')
redelegated_expiry = redelegated_trust['expires_at']
if trust_expiry:
# redelegated trust is from backend and has no tzinfo
if redelegated_expiry < trust_expiry.replace(tzinfo=None):
raise exception.Forbidden(
_('Requested expiration time is more '
'than redelegated trust can provide'))
else:
trust['expires_at'] = redelegated_expiry
# trust roles is a subset of roles of the redelegated trust
parent_roles = set(role['id']
for role in redelegated_trust['roles'])
if not all(role['id'] in parent_roles for role in trust['roles']):
raise exception.Forbidden(
_('Some of requested roles are not in redelegated trust'))
def get_trust_pedigree(self, trust_id):
trust = self.driver.get_trust(trust_id)
trust_chain = [trust]
if trust and trust.get('redelegated_trust_id'):
trusts = self.driver.list_trusts_for_trustor(
trust['trustor_user_id'])
while trust_chain[-1].get('redelegated_trust_id'):
for t in trusts:
if t['id'] == trust_chain[-1]['redelegated_trust_id']:
trust_chain.append(t)
break
return trust_chain
def get_trust(self, trust_id, deleted=False):
trust = self.driver.get_trust(trust_id, deleted)
if trust and trust.get('redelegated_trust_id') and not deleted:
trust_chain = self.get_trust_pedigree(trust_id)
for parent, child in zip(trust_chain[1:], trust_chain):
self._validate_redelegation(parent, child)
try:
self.identity_api.assert_user_enabled(
parent['trustee_user_id'])
except (AssertionError, exception.NotFound):
raise exception.Forbidden(
_('One of the trust agents is disabled or deleted'))
return trust
def create_trust(self, trust_id, trust, roles, redelegated_trust=None,
initiator=None):
"""Create a new trust.
:returns: a new trust
"""
# Default for initial trust in chain is max_redelegation_count
max_redelegation_count = CONF.trust.max_redelegation_count
requested_count = trust.get('redelegation_count')
redelegatable = (trust.pop('allow_redelegation', False)
and requested_count != 0)
if not redelegatable:
trust['redelegation_count'] = requested_count = 0
remaining_uses = trust.get('remaining_uses')
if remaining_uses is not None and remaining_uses <= 0:
msg = _('remaining_uses must be a positive integer or null.')
raise exception.ValidationError(msg)
else:
# Validate requested redelegation depth
if requested_count and requested_count > max_redelegation_count:
raise exception.Forbidden(
_('Requested redelegation depth of %(requested_count)d '
'is greater than allowed %(max_count)d') %
{'requested_count': requested_count,
'max_count': max_redelegation_count})
# Decline remaining_uses
if trust.get('remaining_uses') is not None:
raise exception.ValidationError(
_('remaining_uses must not be set if redelegation is '
'allowed'))
if redelegated_trust:
trust['redelegated_trust_id'] = redelegated_trust['id']
remaining_count = redelegated_trust['redelegation_count'] - 1
# Validate depth consistency
if (redelegatable and requested_count and
requested_count != remaining_count):
msg = _('Modifying "redelegation_count" upon redelegation is '
'forbidden. Omitting this parameter is advised.')
raise exception.Forbidden(msg)
trust.setdefault('redelegation_count', remaining_count)
# Check entire trust pedigree validity
pedigree = self.get_trust_pedigree(redelegated_trust['id'])
for t in pedigree:
self._validate_redelegation(t, trust)
trust.setdefault('redelegation_count', max_redelegation_count)
ref = self.driver.create_trust(trust_id, trust, roles)
notifications.Audit.created(self._TRUST, trust_id, initiator=initiator)
return ref
def delete_trust(self, trust_id, initiator=None):
"""Remove a trust.
:raises: keystone.exception.TrustNotFound
Recursively remove given and redelegated trusts
"""
trust = self.driver.get_trust(trust_id)
trusts = self.driver.list_trusts_for_trustor(
trust['trustor_user_id'])
for t in trusts:
if t.get('redelegated_trust_id') == trust_id:
# recursive call to make sure all notifications are sent
try:
self.delete_trust(t['id'])
except exception.TrustNotFound:
# if trust was deleted by concurrent process
# consistency must not suffer
pass
# end recursion
self.driver.delete_trust(trust_id)
notifications.Audit.deleted(self._TRUST, trust_id, initiator)
@six.add_metaclass(abc.ABCMeta)
class TrustDriverV8(object):
@abc.abstractmethod
def create_trust(self, trust_id, trust, roles):
"""Create a new trust.
:returns: a new trust
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_trust(self, trust_id, deleted=False):
"""Get a trust by the trust id.
:param trust_id: the trust identifier
:type trust_id: string
:param deleted: return the trust even if it is deleted, expired, or
has no consumptions left
:type deleted: bool
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_trusts(self):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_trusts_for_trustee(self, trustee):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_trusts_for_trustor(self, trustor):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_trust(self, trust_id):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def consume_use(self, trust_id):
"""Consume one use when a trust was created with a limitation on its
uses, provided there are still uses available.
:raises: keystone.exception.TrustUseLimitReached,
keystone.exception.TrustNotFound
"""
raise exception.NotImplemented() # pragma: no cover
Driver = manager.create_legacy_driver(TrustDriverV8)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import types
import mock
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic import objects
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as object_utils
INSTANCE_INFO = db_utils.get_test_agent_instance_info()
DRIVER_INFO = db_utils.get_test_agent_driver_info()
DRIVER_INTERNAL_INFO = db_utils.get_test_agent_driver_internal_info()
class TestBaseAgentVendor(db_base.DbTestCase):
def setUp(self):
super(TestBaseAgentVendor, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_agent")
self.passthru = agent_base_vendor.BaseAgentVendor()
n = {
'driver': 'fake_agent',
'instance_info': INSTANCE_INFO,
'driver_info': DRIVER_INFO,
'driver_internal_info': DRIVER_INTERNAL_INFO,
}
self.node = object_utils.create_test_node(self.context, **n)
def test_validate(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
method = 'heartbeat'
self.passthru.validate(task, method)
def test_driver_validate(self):
kwargs = {'version': '2'}
method = 'lookup'
self.passthru.driver_validate(method, **kwargs)
def test_driver_validate_invalid_paremeter(self):
method = 'lookup'
kwargs = {'version': '1'}
self.assertRaises(exception.InvalidParameterValue,
self.passthru.driver_validate,
method, **kwargs)
def test_driver_validate_missing_parameter(self):
method = 'lookup'
kwargs = {}
self.assertRaises(exception.MissingParameterValue,
self.passthru.driver_validate,
method, **kwargs)
def test_lookup_version_not_found(self):
kwargs = {
'version': '999',
}
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.passthru.lookup,
task.context,
**kwargs)
@mock.patch('ironic.drivers.modules.agent_base_vendor.BaseAgentVendor'
'._find_node_by_macs', autospec=True)
def test_lookup_v2(self, find_mock):
kwargs = {
'version': '2',
'inventory': {
'interfaces': [
{
'mac_address': 'aa:bb:cc:dd:ee:ff',
'name': 'eth0'
},
{
'mac_address': 'ff:ee:dd:cc:bb:aa',
'name': 'eth1'
}
]
}
}
find_mock.return_value = self.node
with task_manager.acquire(self.context, self.node.uuid) as task:
node = self.passthru.lookup(task.context, **kwargs)
self.assertEqual(self.node.as_dict(), node['node'])
def test_lookup_v2_missing_inventory(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.passthru.lookup,
task.context)
def test_lookup_v2_empty_inventory(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.passthru.lookup,
task.context,
inventory={})
def test_lookup_v2_empty_interfaces(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.NodeNotFound,
self.passthru.lookup,
task.context,
version='2',
inventory={'interfaces': []})
@mock.patch.object(objects.Node, 'get_by_uuid')
def test_lookup_v2_with_node_uuid(self, mock_get_node):
kwargs = {
'version': '2',
'node_uuid': 'fake uuid',
'inventory': {
'interfaces': [
{
'mac_address': 'aa:bb:cc:dd:ee:ff',
'name': 'eth0'
},
{
'mac_address': 'ff:ee:dd:cc:bb:aa',
'name': 'eth1'
}
]
}
}
mock_get_node.return_value = self.node
with task_manager.acquire(self.context, self.node.uuid) as task:
node = self.passthru.lookup(task.context, **kwargs)
self.assertEqual(self.node.as_dict(), node['node'])
mock_get_node.assert_called_once_with(mock.ANY, 'fake uuid')
@mock.patch.object(objects.port.Port, 'get_by_address',
spec_set=types.FunctionType)
def test_find_ports_by_macs(self, mock_get_port):
fake_port = object_utils.get_test_port(self.context)
mock_get_port.return_value = fake_port
macs = ['aa:bb:cc:dd:ee:ff']
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
ports = self.passthru._find_ports_by_macs(task, macs)
self.assertEqual(1, len(ports))
self.assertEqual(fake_port.uuid, ports[0].uuid)
self.assertEqual(fake_port.node_id, ports[0].node_id)
@mock.patch.object(objects.port.Port, 'get_by_address',
spec_set=types.FunctionType)
def test_find_ports_by_macs_bad_params(self, mock_get_port):
mock_get_port.side_effect = exception.PortNotFound(port="123")
macs = ['aa:bb:cc:dd:ee:ff']
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
empty_ids = self.passthru._find_ports_by_macs(task, macs)
self.assertEqual([], empty_ids)
@mock.patch('ironic.objects.node.Node.get_by_id',
spec_set=types.FunctionType)
@mock.patch('ironic.drivers.modules.agent_base_vendor.BaseAgentVendor'
'._get_node_id', autospec=True)
@mock.patch('ironic.drivers.modules.agent_base_vendor.BaseAgentVendor'
'._find_ports_by_macs', autospec=True)
def test_find_node_by_macs(self, ports_mock, node_id_mock, node_mock):
ports_mock.return_value = object_utils.get_test_port(self.context)
node_id_mock.return_value = '1'
node_mock.return_value = self.node
macs = ['aa:bb:cc:dd:ee:ff']
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
node = self.passthru._find_node_by_macs(task, macs)
self.assertEqual(node, node)
@mock.patch('ironic.drivers.modules.agent_base_vendor.BaseAgentVendor'
'._find_ports_by_macs', autospec=True)
def test_find_node_by_macs_no_ports(self, ports_mock):
ports_mock.return_value = []
macs = ['aa:bb:cc:dd:ee:ff']
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.assertRaises(exception.NodeNotFound,
self.passthru._find_node_by_macs,
task,
macs)
@mock.patch('ironic.objects.node.Node.get_by_uuid',
spec_set=types.FunctionType)
@mock.patch('ironic.drivers.modules.agent_base_vendor.BaseAgentVendor'
'._get_node_id', autospec=True)
@mock.patch('ironic.drivers.modules.agent_base_vendor.BaseAgentVendor'
'._find_ports_by_macs', autospec=True)
def test_find_node_by_macs_nodenotfound(self, ports_mock, node_id_mock,
node_mock):
port = object_utils.get_test_port(self.context)
ports_mock.return_value = [port]
node_id_mock.return_value = self.node['uuid']
node_mock.side_effect = [self.node,
exception.NodeNotFound(node=self.node)]
macs = ['aa:bb:cc:dd:ee:ff']
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.assertRaises(exception.NodeNotFound,
self.passthru._find_node_by_macs,
task,
macs)
def test_get_node_id(self):
fake_port1 = object_utils.get_test_port(self.context,
node_id=123,
address="aa:bb:cc:dd:ee:fe")
fake_port2 = object_utils.get_test_port(self.context,
node_id=123,
id=42,
address="aa:bb:cc:dd:ee:fb",
uuid='1be26c0b-03f2-4d2e-ae87-'
'c02d7f33c782')
node_id = self.passthru._get_node_id([fake_port1, fake_port2])
self.assertEqual(fake_port2.node_id, node_id)
def test_get_node_id_exception(self):
fake_port1 = object_utils.get_test_port(self.context,
node_id=123,
address="aa:bb:cc:dd:ee:fc")
fake_port2 = object_utils.get_test_port(self.context,
node_id=321,
id=42,
address="aa:bb:cc:dd:ee:fd",
uuid='1be26c0b-03f2-4d2e-ae87-'
'c02d7f33c782')
self.assertRaises(exception.NodeNotFound,
self.passthru._get_node_id,
[fake_port1, fake_port2])
def test_get_interfaces(self):
fake_inventory = {
'interfaces': [
{
'mac_address': 'aa:bb:cc:dd:ee:ff',
'name': 'eth0'
}
]
}
interfaces = self.passthru._get_interfaces(fake_inventory)
self.assertEqual(fake_inventory['interfaces'], interfaces)
def test_get_interfaces_bad(self):
self.assertRaises(exception.InvalidParameterValue,
self.passthru._get_interfaces,
inventory={})
def test_heartbeat(self):
kwargs = {
'agent_url': 'http://127.0.0.1:9999/bar'
}
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.passthru.heartbeat(task, **kwargs)
def test_heartbeat_bad(self):
kwargs = {}
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
self.passthru.heartbeat, task, **kwargs)
@mock.patch.object(agent_base_vendor.BaseAgentVendor, 'deploy_has_started',
autospec=True)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor, 'deploy_is_done',
autospec=True)
@mock.patch.object(agent_base_vendor.LOG, 'exception', autospec=True)
def test_heartbeat_deploy_done_fails(self, log_mock, done_mock,
failed_mock, deploy_started_mock):
deploy_started_mock.return_value = True
kwargs = {
'agent_url': 'http://127.0.0.1:9999/bar'
}
done_mock.side_effect = iter([Exception('LlamaException')])
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
task.node.provision_state = states.DEPLOYWAIT
task.node.target_provision_state = states.ACTIVE
self.passthru.heartbeat(task, **kwargs)
failed_mock.assert_called_once_with(task, mock.ANY)
log_mock.assert_called_once_with(
'Asynchronous exception for node '
'1be26c0b-03f2-4d2e-ae87-c02d7f33c123: Failed checking if deploy '
'is done. Exception: LlamaException')
@mock.patch.object(agent_base_vendor.BaseAgentVendor, 'deploy_has_started',
autospec=True)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor, 'deploy_is_done',
autospec=True)
@mock.patch.object(agent_base_vendor.LOG, 'exception', autospec=True)
def test_heartbeat_deploy_done_raises_with_event(self, log_mock, done_mock,
failed_mock,
deploy_started_mock):
deploy_started_mock.return_value = True
kwargs = {
'agent_url': 'http://127.0.0.1:9999/bar'
}
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
def driver_failure(*args, **kwargs):
# simulate driver failure that both advances the FSM
# and raises an exception
task.node.provision_state = states.DEPLOYFAIL
raise Exception('LlamaException')
task.node.provision_state = states.DEPLOYWAIT
task.node.target_provision_state = states.ACTIVE
done_mock.side_effect = driver_failure
self.passthru.heartbeat(task, **kwargs)
# task.node.provision_state being set to DEPLOYFAIL
# within the driver_failue, hearbeat should not call
# deploy_utils.set_failed_state anymore
self.assertFalse(failed_mock.called)
log_mock.assert_called_once_with(
'Asynchronous exception for node '
'1be26c0b-03f2-4d2e-ae87-c02d7f33c123: Failed checking if deploy '
'is done. Exception: LlamaException')
@mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'_refresh_clean_steps', autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
def test_heartbeat_resume_clean(self, mock_notify, mock_set_steps,
mock_refresh, mock_touch):
kwargs = {
'agent_url': 'http://127.0.0.1:9999/bar'
}
self.node.clean_step = {}
for state in (states.CLEANWAIT, states.CLEANING):
self.node.provision_state = state
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
self.passthru.heartbeat(task, **kwargs)
mock_touch.assert_called_once_with(mock.ANY)
mock_refresh.assert_called_once_with(mock.ANY, task)
mock_notify.assert_called_once_with(mock.ANY, task)
mock_set_steps.assert_called_once_with(task)
# Reset mocks for the next interaction
mock_touch.reset_mock()
mock_refresh.reset_mock()
mock_notify.reset_mock()
mock_set_steps.reset_mock()
@mock.patch.object(manager_utils, 'cleaning_error_handler')
@mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'_refresh_clean_steps', autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
def test_heartbeat_resume_clean_fails(self, mock_notify, mock_set_steps,
mock_refresh, mock_touch,
mock_handler):
mocks = [mock_refresh, mock_set_steps, mock_notify]
kwargs = {
'agent_url': 'http://127.0.0.1:9999/bar'
}
self.node.clean_step = {}
self.node.save()
for state in (states.CLEANWAIT, states.CLEANING):
self.node.provision_state = state
self.node.save()
for i in range(len(mocks)):
before_failed_mocks = mocks[:i]
failed_mock = mocks[i]
after_failed_mocks = mocks[i + 1:]
failed_mock.side_effect = Exception()
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
self.passthru.heartbeat(task, **kwargs)
mock_touch.assert_called_once_with(mock.ANY)
mock_handler.assert_called_once_with(task, mock.ANY)
for called in before_failed_mocks + [failed_mock]:
self.assertTrue(called.called)
for not_called in after_failed_mocks:
self.assertFalse(not_called.called)
# Reset mocks for the next interaction
for m in mocks + [mock_touch, mock_handler]:
m.reset_mock()
failed_mock.side_effect = None
@mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'continue_cleaning', autospec=True)
def test_heartbeat_continue_cleaning(self, mock_continue, mock_touch):
kwargs = {
'agent_url': 'http://127.0.0.1:9999/bar'
}
self.node.clean_step = {
'priority': 10,
'interface': 'deploy',
'step': 'foo',
'reboot_requested': False
}
for state in (states.CLEANWAIT, states.CLEANING):
self.node.provision_state = state
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
self.passthru.heartbeat(task, **kwargs)
mock_touch.assert_called_once_with(mock.ANY)
mock_continue.assert_called_once_with(mock.ANY, task, **kwargs)
# Reset mocks for the next interaction
mock_touch.reset_mock()
mock_continue.reset_mock()
@mock.patch.object(manager_utils, 'cleaning_error_handler')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'continue_cleaning', autospec=True)
def test_heartbeat_continue_cleaning_fails(self, mock_continue,
mock_handler):
kwargs = {
'agent_url': 'http://127.0.0.1:9999/bar'
}
self.node.clean_step = {
'priority': 10,
'interface': 'deploy',
'step': 'foo',
'reboot_requested': False
}
mock_continue.side_effect = Exception()
for state in (states.CLEANWAIT, states.CLEANING):
self.node.provision_state = state
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
self.passthru.heartbeat(task, **kwargs)
mock_continue.assert_called_once_with(mock.ANY, task, **kwargs)
mock_handler.assert_called_once_with(task, mock.ANY)
mock_handler.reset_mock()
mock_continue.reset_mock()
@mock.patch.object(manager_utils, 'cleaning_error_handler')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'continue_cleaning', autospec=True)
def test_heartbeat_continue_cleaning_no_worker(self, mock_continue,
mock_handler):
kwargs = {
'agent_url': 'http://127.0.0.1:9999/bar'
}
self.node.clean_step = {
'priority': 10,
'interface': 'deploy',
'step': 'foo',
'reboot_requested': False
}
mock_continue.side_effect = exception.NoFreeConductorWorker()
for state in (states.CLEANWAIT, states.CLEANING):
self.node.provision_state = state
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
self.passthru.heartbeat(task, **kwargs)
mock_continue.assert_called_once_with(mock.ANY, task, **kwargs)
self.assertFalse(mock_handler.called)
mock_handler.reset_mock()
mock_continue.reset_mock()
@mock.patch.object(agent_base_vendor.BaseAgentVendor, 'continue_deploy',
autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor, 'reboot_to_instance',
autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
def test_heartbeat_noops_maintenance_mode(self, ncrc_mock, rti_mock,
cd_mock):
"""Ensures that heartbeat() no-ops for a maintenance node."""
kwargs = {
'agent_url': 'http://127.0.0.1:9999/bar'
}
self.node.maintenance = True
for state in (states.AVAILABLE, states.DEPLOYWAIT, states.DEPLOYING,
states.CLEANING):
self.node.provision_state = state
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=True) as task:
self.passthru.heartbeat(task, **kwargs)
self.assertEqual(0, ncrc_mock.call_count)
self.assertEqual(0, rti_mock.call_count)
self.assertEqual(0, cd_mock.call_count)
@mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor, 'deploy_has_started',
autospec=True)
def test_heartbeat_touch_provisioning(self, mock_deploy_started,
mock_touch):
mock_deploy_started.return_value = True
kwargs = {
'agent_url': 'http://127.0.0.1:9999/bar'
}
self.node.provision_state = states.DEPLOYWAIT
self.node.save()
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
self.passthru.heartbeat(task, **kwargs)
mock_touch.assert_called_once_with(mock.ANY)
def test_vendor_passthru_vendor_routes(self):
expected = ['heartbeat']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
vendor_routes = task.driver.vendor.vendor_routes
self.assertIsInstance(vendor_routes, dict)
self.assertEqual(expected, list(vendor_routes))
def test_vendor_passthru_driver_routes(self):
expected = ['lookup']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
driver_routes = task.driver.vendor.driver_routes
self.assertIsInstance(driver_routes, dict)
self.assertEqual(expected, list(driver_routes))
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
def test_reboot_and_finish_deploy(self, power_off_mock,
get_power_state_mock,
node_power_action_mock):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
get_power_state_mock.side_effect = [states.POWER_ON,
states.POWER_OFF]
self.passthru.reboot_and_finish_deploy(task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(2, get_power_state_mock.call_count)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
def test_reboot_and_finish_deploy_soft_poweroff_doesnt_complete(
self, power_off_mock, get_power_state_mock,
node_power_action_mock):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
get_power_state_mock.return_value = states.POWER_ON
self.passthru.reboot_and_finish_deploy(task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
def test_reboot_and_finish_deploy_soft_poweroff_fails(
self, power_off_mock, node_power_action_mock):
power_off_mock.side_effect = iter([RuntimeError("boom")])
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.passthru.reboot_and_finish_deploy(task)
power_off_mock.assert_called_once_with(task.node)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
def test_reboot_and_finish_deploy_get_power_state_fails(
self, power_off_mock, get_power_state_mock,
node_power_action_mock):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
get_power_state_mock.side_effect = iter([RuntimeError("boom")])
self.passthru.reboot_and_finish_deploy(task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
node_power_action_mock.assert_called_once_with(
task, states.REBOOT)
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
def test_reboot_and_finish_deploy_power_action_fails(
self, power_off_mock, get_power_state_mock,
node_power_action_mock):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
get_power_state_mock.return_value = states.POWER_ON
node_power_action_mock.side_effect = iter([RuntimeError("boom")])
self.assertRaises(exception.InstanceDeployFailure,
self.passthru.reboot_and_finish_deploy,
task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
node_power_action_mock.assert_has_calls([
mock.call(task, states.REBOOT),
mock.call(task, states.POWER_OFF)])
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
def test_configure_local_boot(self, try_set_boot_device_mock,
install_bootloader_mock):
install_bootloader_mock.return_value = {
'command_status': 'SUCCESS', 'command_error': None}
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
self.passthru.configure_local_boot(task,
root_uuid='some-root-uuid')
try_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK)
install_bootloader_mock.assert_called_once_with(
mock.ANY, task.node, root_uuid='some-root-uuid',
efi_system_part_uuid=None)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
def test_configure_local_boot_uefi(self, try_set_boot_device_mock,
install_bootloader_mock):
install_bootloader_mock.return_value = {
'command_status': 'SUCCESS', 'command_error': None}
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
self.passthru.configure_local_boot(
task, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-system-part-uuid')
try_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK)
install_bootloader_mock.assert_called_once_with(
mock.ANY, task.node, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-system-part-uuid')
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
def test_configure_local_boot_whole_disk_image(
self, install_bootloader_mock, try_set_boot_device_mock):
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.passthru.configure_local_boot(task)
self.assertFalse(install_bootloader_mock.called)
try_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
def test_configure_local_boot_no_root_uuid(
self, install_bootloader_mock, try_set_boot_device_mock):
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
self.passthru.configure_local_boot(task)
self.assertFalse(install_bootloader_mock.called)
try_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
def test_configure_local_boot_boot_loader_install_fail(
self, install_bootloader_mock):
install_bootloader_mock.return_value = {
'command_status': 'FAILED', 'command_error': 'boom'}
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InstanceDeployFailure,
self.passthru.configure_local_boot,
task, root_uuid='some-root-uuid')
install_bootloader_mock.assert_called_once_with(
mock.ANY, task.node, root_uuid='some-root-uuid',
efi_system_part_uuid=None)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
def test_configure_local_boot_set_boot_device_fail(
self, install_bootloader_mock, try_set_boot_device_mock):
install_bootloader_mock.return_value = {
'command_status': 'SUCCESS', 'command_error': None}
try_set_boot_device_mock.side_effect = iter([RuntimeError('error')])
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InstanceDeployFailure,
self.passthru.configure_local_boot,
task, root_uuid='some-root-uuid')
install_bootloader_mock.assert_called_once_with(
mock.ANY, task.node, root_uuid='some-root-uuid',
efi_system_part_uuid=None)
try_set_boot_device_mock.assert_called_once_with(
task, boot_devices.DISK)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning(self, status_mock, notify_mock):
# Test a successful execute clean step on the agent
self.node.clean_step = {
'priority': 10,
'interface': 'deploy',
'step': 'erase_devices',
'reboot_requested': False
}
self.node.save()
status_mock.return_value = [{
'command_status': 'SUCCEEDED',
'command_name': 'execute_clean_step',
'command_result': {
'clean_step': self.node.clean_step
}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.passthru.continue_cleaning(task)
notify_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(agent_base_vendor,
'_get_post_clean_step_hook', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_with_hook(
self, status_mock, notify_mock, get_hook_mock):
self.node.clean_step = {
'priority': 10,
'interface': 'raid',
'step': 'create_configuration',
}
self.node.save()
command_status = {
'command_status': 'SUCCEEDED',
'command_name': 'execute_clean_step',
'command_result': {'clean_step': self.node.clean_step}}
status_mock.return_value = [command_status]
hook_mock = mock.MagicMock(spec=types.FunctionType, __name__='foo')
get_hook_mock.return_value = hook_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.passthru.continue_cleaning(task)
get_hook_mock.assert_called_once_with(task.node)
hook_mock.assert_called_once_with(task, command_status)
notify_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
@mock.patch.object(agent_base_vendor,
'_get_post_clean_step_hook', autospec=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_with_hook_fails(
self, status_mock, error_handler_mock, get_hook_mock,
notify_mock):
self.node.clean_step = {
'priority': 10,
'interface': 'raid',
'step': 'create_configuration',
}
self.node.save()
command_status = {
'command_status': 'SUCCEEDED',
'command_name': 'execute_clean_step',
'command_result': {'clean_step': self.node.clean_step}}
status_mock.return_value = [command_status]
hook_mock = mock.MagicMock(spec=types.FunctionType, __name__='foo')
hook_mock.side_effect = RuntimeError('error')
get_hook_mock.return_value = hook_mock
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.passthru.continue_cleaning(task)
get_hook_mock.assert_called_once_with(task.node)
hook_mock.assert_called_once_with(task, command_status)
error_handler_mock.assert_called_once_with(task, mock.ANY)
self.assertFalse(notify_mock.called)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_old_command(self, status_mock, notify_mock):
# Test when a second execute_clean_step happens to the agent, but
# the new step hasn't started yet.
self.node.clean_step = {
'priority': 10,
'interface': 'deploy',
'step': 'erase_devices',
'reboot_requested': False
}
self.node.save()
status_mock.return_value = [{
'command_status': 'SUCCEEDED',
'command_name': 'execute_clean_step',
'command_result': {
'priority': 20,
'interface': 'deploy',
'step': 'update_firmware',
'reboot_requested': False
}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.passthru.continue_cleaning(task)
self.assertFalse(notify_mock.called)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_running(self, status_mock, notify_mock):
# Test that no action is taken while a clean step is executing
status_mock.return_value = [{
'command_status': 'RUNNING',
'command_name': 'execute_clean_step',
'command_result': None
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.passthru.continue_cleaning(task)
self.assertFalse(notify_mock.called)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_fail(self, status_mock, error_mock):
# Test the a failure puts the node in CLEANFAIL
status_mock.return_value = [{
'command_status': 'FAILED',
'command_name': 'execute_clean_step',
'command_result': {}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.passthru.continue_cleaning(task)
error_mock.assert_called_once_with(task, mock.ANY)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'_refresh_clean_steps', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def _test_continue_cleaning_clean_version_mismatch(
self, status_mock, refresh_steps_mock, notify_mock, steps_mock,
manual=False):
status_mock.return_value = [{
'command_status': 'CLEAN_VERSION_MISMATCH',
'command_name': 'execute_clean_step',
}]
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
self.node.provision_state = states.CLEANWAIT
self.node.target_provision_state = tgt_prov_state
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.passthru.continue_cleaning(task)
notify_mock.assert_called_once_with(mock.ANY, task)
refresh_steps_mock.assert_called_once_with(mock.ANY, task)
if manual:
self.assertFalse(
task.node.driver_internal_info['skip_current_clean_step'])
self.assertFalse(steps_mock.called)
else:
steps_mock.assert_called_once_with(task)
self.assertFalse('skip_current_clean_step' in
task.node.driver_internal_info)
def test_continue_cleaning_automated_clean_version_mismatch(self):
self._test_continue_cleaning_clean_version_mismatch()
def test_continue_cleaning_manual_clean_version_mismatch(self):
self._test_continue_cleaning_clean_version_mismatch(manual=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'_refresh_clean_steps', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_clean_version_mismatch_fail(
self, status_mock, refresh_steps_mock, notify_mock, steps_mock,
error_mock, manual=False):
status_mock.return_value = [{
'command_status': 'CLEAN_VERSION_MISMATCH',
'command_name': 'execute_clean_step',
'command_result': {'hardware_manager_version': {'Generic': '1'}}
}]
refresh_steps_mock.side_effect = exception.NodeCleaningFailure("boo")
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
self.node.provision_state = states.CLEANWAIT
self.node.target_provision_state = tgt_prov_state
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.passthru.continue_cleaning(task)
status_mock.assert_called_once_with(mock.ANY, task.node)
refresh_steps_mock.assert_called_once_with(mock.ANY, task)
error_mock.assert_called_once_with(task, mock.ANY)
self.assertFalse(notify_mock.called)
self.assertFalse(steps_mock.called)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
def test_continue_cleaning_unknown(self, status_mock, error_mock):
# Test that unknown commands are treated as failures
status_mock.return_value = [{
'command_status': 'UNKNOWN',
'command_name': 'execute_clean_step',
'command_result': {}
}]
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.passthru.continue_cleaning(task)
error_mock.assert_called_once_with(task, mock.ANY)
def _test_clean_step_hook(self, hook_dict_mock):
"""Helper method for unit tests related to clean step hooks.
This is a helper method for other unit tests related to
clean step hooks. It acceps a mock 'hook_dict_mock' which is
a MagicMock and sets it up to function as a mock dictionary.
After that, it defines a dummy hook_method for two clean steps
raid.create_configuration and raid.delete_configuration.
:param hook_dict_mock: An instance of mock.MagicMock() which
is the mocked value of agent_base_vendor.POST_CLEAN_STEP_HOOKS
:returns: a tuple, where the first item is the hook method created
by this method and second item is the backend dictionary for
the mocked hook_dict_mock
"""
hook_dict = {}
def get(key, default):
return hook_dict.get(key, default)
def getitem(self, key):
return hook_dict[key]
def setdefault(key, default):
if key not in hook_dict:
hook_dict[key] = default
return hook_dict[key]
hook_dict_mock.get = get
hook_dict_mock.__getitem__ = getitem
hook_dict_mock.setdefault = setdefault
some_function_mock = mock.MagicMock()
@agent_base_vendor.post_clean_step_hook(
interface='raid', step='delete_configuration')
@agent_base_vendor.post_clean_step_hook(
interface='raid', step='create_configuration')
def hook_method():
some_function_mock('some-arguments')
return hook_method, hook_dict
@mock.patch.object(agent_base_vendor, 'POST_CLEAN_STEP_HOOKS',
spec_set=dict)
def test_post_clean_step_hook(self, hook_dict_mock):
# This unit test makes sure that hook methods are registered
# properly and entries are made in
# agent_base_vendor.POST_CLEAN_STEP_HOOKS
hook_method, hook_dict = self._test_clean_step_hook(hook_dict_mock)
self.assertEqual(hook_method,
hook_dict['raid']['create_configuration'])
self.assertEqual(hook_method,
hook_dict['raid']['delete_configuration'])
@mock.patch.object(agent_base_vendor, 'POST_CLEAN_STEP_HOOKS',
spec_set=dict)
def test__get_post_clean_step_hook(self, hook_dict_mock):
# Check if agent_base_vendor._get_post_clean_step_hook can get
# clean step for which hook is registered.
hook_method, hook_dict = self._test_clean_step_hook(hook_dict_mock)
self.node.clean_step = {'step': 'create_configuration',
'interface': 'raid'}
self.node.save()
hook_returned = agent_base_vendor._get_post_clean_step_hook(self.node)
self.assertEqual(hook_method, hook_returned)
@mock.patch.object(agent_base_vendor, 'POST_CLEAN_STEP_HOOKS',
spec_set=dict)
def test__get_post_clean_step_hook_no_hook_registered(
self, hook_dict_mock):
# Make sure agent_base_vendor._get_post_clean_step_hook returns
# None when no clean step hook is registered for the clean step.
hook_method, hook_dict = self._test_clean_step_hook(hook_dict_mock)
self.node.clean_step = {'step': 'some-clean-step',
'interface': 'some-other-interface'}
self.node.save()
hook_returned = agent_base_vendor._get_post_clean_step_hook(self.node)
self.assertIsNone(hook_returned)
class TestRefreshCleanSteps(TestBaseAgentVendor):
def setUp(self):
super(TestRefreshCleanSteps, self).setUp()
self.node.driver_internal_info['agent_url'] = 'http://127.0.0.1:9999'
self.ports = [object_utils.create_test_port(self.context,
node_id=self.node.id)]
self.clean_steps = {
'hardware_manager_version': '1',
'clean_steps': {
'GenericHardwareManager': [
{'interface': 'deploy',
'step': 'erase_devices',
'priority': 20},
],
'SpecificHardwareManager': [
{'interface': 'deploy',
'step': 'update_firmware',
'priority': 30},
{'interface': 'raid',
'step': 'create_configuration',
'priority': 10},
]
}
}
@mock.patch.object(agent_client.AgentClient, 'get_clean_steps',
autospec=True)
def test__refresh_clean_steps(self, client_mock):
client_mock.return_value = {
'command_result': self.clean_steps}
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.passthru._refresh_clean_steps(task)
client_mock.assert_called_once_with(mock.ANY, task.node,
task.ports)
self.assertEqual('1', task.node.driver_internal_info[
'hardware_manager_version'])
self.assertTrue('agent_cached_clean_steps_refreshed' in
task.node.driver_internal_info)
steps = task.node.driver_internal_info['agent_cached_clean_steps']
# Since steps are returned in dicts, they have non-deterministic
# ordering
self.assertEqual(2, len(steps))
self.assertIn(self.clean_steps['clean_steps'][
'GenericHardwareManager'][0], steps['deploy'])
self.assertIn(self.clean_steps['clean_steps'][
'SpecificHardwareManager'][0], steps['deploy'])
self.assertEqual([self.clean_steps['clean_steps'][
'SpecificHardwareManager'][1]], steps['raid'])
@mock.patch.object(agent_client.AgentClient, 'get_clean_steps',
autospec=True)
def test__refresh_clean_steps_missing_steps(self, client_mock):
del self.clean_steps['clean_steps']
client_mock.return_value = {
'command_result': self.clean_steps}
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertRaisesRegex(exception.NodeCleaningFailure,
'invalid result',
self.passthru._refresh_clean_steps,
task)
client_mock.assert_called_once_with(mock.ANY, task.node,
task.ports)
@mock.patch.object(agent_client.AgentClient, 'get_clean_steps',
autospec=True)
def test__refresh_clean_steps_missing_interface(self, client_mock):
step = self.clean_steps['clean_steps']['SpecificHardwareManager'][1]
del step['interface']
client_mock.return_value = {
'command_result': self.clean_steps}
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
self.assertRaisesRegex(exception.NodeCleaningFailure,
'invalid clean step',
self.passthru._refresh_clean_steps,
task)
client_mock.assert_called_once_with(mock.ANY, task.node,
task.ports)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks import workflows \
as network_workflows
LOG = logging.getLogger(__name__)
class CreateSubnetInfoAction(network_workflows.CreateSubnetInfoAction):
with_subnet = forms.BooleanField(initial=True, required=False,
widget=forms.HiddenInput())
class Meta:
name = _("Subnet")
help_text = _('You can create a subnet associated with the '
'network. Advanced configuration are available '
'at "Subnet Detail" tab.')
def clean(self):
cleaned_data = workflows.Action.clean(self)
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(network_workflows.CreateSubnetInfo):
action_class = CreateSubnetInfoAction
depends_on = ("network_id",)
class CreateSubnet(network_workflows.CreateNetwork):
slug = "create_subnet"
name = _("Create Subnet")
finalize_button_name = _("Create")
success_message = _('Created subnet "%s".')
failure_message = _('Unable to create subnet "%s".')
default_steps = (CreateSubnetInfo,
network_workflows.CreateSubnetDetail)
def format_status_message(self, message):
name = self.context.get('subnet_name') or self.context.get('subnet_id')
return message % name
def get_success_url(self):
return reverse("horizon:project:networks:detail",
args=(self.context.get('network_id'),))
def get_failure_url(self):
return reverse("horizon:project:networks:detail",
args=(self.context.get('network_id'),))
def handle(self, request, data):
subnet = self._create_subnet(request, data)
return True if subnet else False
class UpdateSubnetInfoAction(CreateSubnetInfoAction):
cidr = forms.IPField(label=_("Network Address"),
required=False,
initial="",
widget=forms.TextInput(
attrs={'readonly': 'readonly'}),
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
# NOTE(amotoki): When 'disabled' attribute is set for the ChoiceField
# and ValidationError is raised for POST request, the initial value of
# the ip_version ChoiceField is not set in the re-displayed form
# As a result, 'IPv4' is displayed even when IPv6 is used if
# ValidationError is detected. In addition 'required=True' check complains
# when re-POST since the value of the ChoiceField is not set.
# Thus now I use HiddenInput for the ip_version ChoiceField as a work
# around.
ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')],
#widget=forms.Select(
# attrs={'disabled': 'disabled'}),
widget=forms.HiddenInput(),
label=_("IP Version"))
gateway_ip = forms.IPField(
label=_("Gateway IP (optional)"),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254). "
"You need to specify an explicit address "
"to set the gateway. "
"If you want to use no gateway, "
"check 'Disable Gateway' below."),
version=forms.IPv4 | forms.IPv6,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
initial=False, required=False)
class Meta:
name = _("Subnet")
help_text = _('You can update a subnet associated with the '
'network. Advanced configuration are available '
'at "Subnet Detail" tab.')
def clean(self):
cleaned_data = workflows.Action.clean(self)
self._check_subnet_data(cleaned_data, is_create=False)
return cleaned_data
class UpdateSubnetInfo(CreateSubnetInfo):
action_class = UpdateSubnetInfoAction
depends_on = ("network_id", "subnet_id")
class UpdateSubnetDetailAction(network_workflows.CreateSubnetDetailAction):
allocation_pools = forms.CharField(widget=forms.HiddenInput(),
required=False)
class Meta:
name = _("Subnet Detail")
help_text = _('You can specify additional attributes for the subnet.')
class UpdateSubnetDetail(network_workflows.CreateSubnetDetail):
action_class = UpdateSubnetDetailAction
class UpdateSubnet(network_workflows.CreateNetwork):
slug = "update_subnet"
name = _("Edit Subnet")
finalize_button_name = _("Save")
success_message = _('Updated subnet "%s".')
failure_message = _('Unable to update subnet "%s".')
success_url = "horizon:project:networks:detail"
failure_url = "horizon:project:networks:detail"
default_steps = (UpdateSubnetInfo,
UpdateSubnetDetail)
def format_status_message(self, message):
name = self.context.get('subnet_name') or self.context.get('subnet_id')
return message % name
def get_success_url(self):
return reverse(self.success_url,
args=(self.context.get('network_id'),))
def _update_subnet(self, request, data):
network_id = self.context.get('network_id')
try:
subnet_id = self.context.get('subnet_id')
params = {}
params['name'] = data['subnet_name']
if data['no_gateway']:
params['gateway_ip'] = None
elif data['gateway_ip']:
params['gateway_ip'] = data['gateway_ip']
#We should send gateway_ip only when it is changed,
#because updating gateway_ip is prohibited
#when the ip is used.
#see bug 1227268
subnet = api.neutron.subnet_get(request, subnet_id)
if params['gateway_ip'] == subnet.gateway_ip:
del params['gateway_ip']
self._setup_subnet_parameters(params, data, is_create=False)
subnet = api.neutron.subnet_update(request, subnet_id, **params)
msg = _('Subnet "%s" was successfully updated.') % data['cidr']
LOG.debug(msg)
return subnet
except Exception as e:
msg = (_('Failed to update subnet "%(sub)s": '
' %(reason)s') %
{"sub": data['cidr'], "reason": e})
redirect = reverse(self.failure_url, args=(network_id,))
exceptions.handle(request, msg, redirect=redirect)
return False
def handle(self, request, data):
subnet = self._update_subnet(request, data)
return True if subnet else False
|
|
"""
Views for creating, editing and viewing site-specific user profiles.
"""
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.views.generic.list_detail import object_list
from la_profiles import utils
def create_profile(request, form_class=None, success_url=None,
template_name='profiles/create_profile.html',
extra_context=None):
"""
Create a profile for the current user, if one doesn't already
exist.
If the user already has a profile, as determined by
``request.user.get_profile()``, a redirect will be issued to the
:view:`profiles.views.edit_profile` view. If no profile model has
been specified in the ``AUTH_PROFILE_MODULE`` setting,
``django.contrib.auth.models.SiteProfileNotAvailable`` will be
raised.
**Optional arguments:**
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``form_class``
The form class to use for validating and creating the user
profile. This form class must define a method named
``save()``, implementing the same argument signature as the
``save()`` method of a standard Django ``ModelForm`` (this
view will call ``save(commit=False)`` to obtain the profile
object, and fill in the user before the final save). If the
profile object includes many-to-many relations, the convention
established by ``ModelForm`` of using a method named
``save_m2m()`` will be used, and so your form class should
also define this method.
If this argument is not supplied, this view will use a
``ModelForm`` automatically generated from the model specified
by ``AUTH_PROFILE_MODULE``.
``success_url``
The URL to redirect to after successful profile creation. If
this argument is not supplied, this will default to the URL of
:view:`profiles.views.profile_detail` for the newly-created
profile object.
``template_name``
The template to use when displaying the profile-creation
form. If not supplied, this will default to
:template:`profiles/create_profile.html`.
**Context:**
``form``
The profile-creation form.
**Template:**
``template_name`` keyword argument, or
:template:`profiles/create_profile.html`.
"""
try:
profile_obj = request.user.get_profile()
return HttpResponseRedirect(reverse('profiles_edit_profile'))
except ObjectDoesNotExist:
pass
#
# We set up success_url here, rather than as the default value for
# the argument. Trying to do it as the argument's default would
# mean evaluating the call to reverse() at the time this module is
# first imported, which introduces a circular dependency: to
# perform the reverse lookup we need access to profiles/urls.py,
# but profiles/urls.py in turn imports this module.
#
if success_url is None:
success_url = reverse('profiles_profile_detail',
kwargs={ 'username': request.user.username })
if form_class is None:
form_class = utils.get_profile_form()
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES)
if form.is_valid():
profile_obj = form.save(commit=False)
profile_obj.user = request.user
profile_obj.save()
if hasattr(form, 'save_m2m'):
form.save_m2m()
return HttpResponseRedirect(success_url)
else:
form = form_class()
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{ 'form': form },
context_instance=context)
create_profile = login_required(create_profile)
def edit_profile(request, form_class=None, success_url=None,
template_name='profiles/edit_profile.html',
extra_context=None):
"""
Edit the current user's profile.
If the user does not already have a profile (as determined by
``User.get_profile()``), a redirect will be issued to the
:view:`profiles.views.create_profile` view; if no profile model
has been specified in the ``AUTH_PROFILE_MODULE`` setting,
``django.contrib.auth.models.SiteProfileNotAvailable`` will be
raised.
**Optional arguments:**
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``form_class``
The form class to use for validating and editing the user
profile. This form class must operate similarly to a standard
Django ``ModelForm`` in that it must accept an instance of the
object to be edited as the keyword argument ``instance`` to
its constructor, and it must implement a method named
``save()`` which will save the updates to the object. If this
argument is not specified, this view will use a ``ModelForm``
generated from the model specified in the
``AUTH_PROFILE_MODULE`` setting.
``success_url``
The URL to redirect to following a successful edit. If not
specified, this will default to the URL of
:view:`profiles.views.profile_detail` for the profile object
being edited.
``template_name``
The template to use when displaying the profile-editing
form. If not specified, this will default to
:template:`profiles/edit_profile.html`.
**Context:**
``form``
The form for editing the profile.
``profile``
The user's current profile.
**Template:**
``template_name`` keyword argument or
:template:`profiles/edit_profile.html`.
"""
try:
profile_obj = request.user.get_profile()
except ObjectDoesNotExist:
return HttpResponseRedirect(reverse('profiles_create_profile'))
#
# See the comment in create_profile() for discussion of why
# success_url is set up here, rather than as a default value for
# the argument.
#
if success_url is None:
success_url = reverse('profiles_profile_detail',
kwargs={ 'username': request.user.username })
if form_class is None:
form_class = utils.get_profile_form()
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES, instance=profile_obj)
if form.is_valid():
form.save()
return HttpResponseRedirect(success_url)
else:
form = form_class(instance=profile_obj)
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{ 'form': form,
'profile': profile_obj, },
context_instance=context)
edit_profile = login_required(edit_profile)
def profile_detail(request, username, public_profile_field=None,
template_name='profiles/profile_detail.html',
extra_context=None):
"""
Detail view of a user's profile.
If no profile model has been specified in the
``AUTH_PROFILE_MODULE`` setting,
``django.contrib.auth.models.SiteProfileNotAvailable`` will be
raised.
If the user has not yet created a profile, ``Http404`` will be
raised.
**Required arguments:**
``username``
The username of the user whose profile is being displayed.
**Optional arguments:**
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``public_profile_field``
The name of a ``BooleanField`` on the profile model; if the
value of that field on the user's profile is ``False``, the
``profile`` variable in the template will be ``None``. Use
this feature to allow users to mark their profiles as not
being publicly viewable.
If this argument is not specified, it will be assumed that all
users' profiles are publicly viewable.
``template_name``
The name of the template to use for displaying the profile. If
not specified, this will default to
:template:`profiles/profile_detail.html`.
**Context:**
``profile``
The user's profile, or ``None`` if the user's profile is not
publicly viewable (see the description of
``public_profile_field`` above).
**Template:**
``template_name`` keyword argument or
:template:`profiles/profile_detail.html`.
"""
user = get_object_or_404(User, username=username)
try:
profile_obj = user.get_profile()
except ObjectDoesNotExist:
raise Http404
if public_profile_field is not None and \
not getattr(profile_obj, public_profile_field):
profile_obj = None
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{ 'profile': profile_obj },
context_instance=context)
def profile_list(request, public_profile_field=None,
template_name='profiles/profile_list.html', **kwargs):
"""
A list of user profiles.
If no profile model has been specified in the
``AUTH_PROFILE_MODULE`` setting,
``django.contrib.auth.models.SiteProfileNotAvailable`` will be
raised.
**Optional arguments:**
``public_profile_field``
The name of a ``BooleanField`` on the profile model; if the
value of that field on a user's profile is ``False``, that
profile will be excluded from the list. Use this feature to
allow users to mark their profiles as not being publicly
viewable.
If this argument is not specified, it will be assumed that all
users' profiles are publicly viewable.
``template_name``
The name of the template to use for displaying the profiles. If
not specified, this will default to
:template:`profiles/profile_list.html`.
Additionally, all arguments accepted by the
:view:`django.views.generic.list_detail.object_list` generic view
will be accepted here, and applied in the same fashion, with one
exception: ``queryset`` will always be the ``QuerySet`` of the
model specified by the ``AUTH_PROFILE_MODULE`` setting, optionally
filtered to remove non-publicly-viewable proiles.
**Context:**
Same as the :view:`django.views.generic.list_detail.object_list`
generic view.
**Template:**
``template_name`` keyword argument or
:template:`profiles/profile_list.html`.
"""
profile_model = utils.get_profile_model()
queryset = profile_model._default_manager.all()
if public_profile_field is not None:
queryset = queryset.filter(**{ public_profile_field: True })
kwargs['queryset'] = queryset
return object_list(request, template_name=template_name, **kwargs)
|
|
import os
from collections import namedtuple
from contextlib import contextmanager
from unittest import mock
from flask import g
from flask_testing import TestCase
from werkzeug.datastructures import Headers
from database import create_session
from mod_auth.models import Role, User
from mod_customized.models import CustomizedTest, TestFork
from mod_home.models import CCExtractorVersion, GeneralData
from mod_regression.models import (Category, InputType, OutputType,
RegressionTest, RegressionTestOutput,
RegressionTestOutputFiles)
from mod_sample.models import ForbiddenExtension, ForbiddenMimeType, Sample
from mod_test.models import (Fork, Test, TestPlatform, TestProgress,
TestResult, TestResultFile, TestStatus, TestType)
from mod_upload.models import Platform, Upload
@contextmanager
def provide_file_at_root(file_name, to_write=None):
"""
Provide file with name file_name at application root.
"""
if to_write is None:
to_write = "DATABASE_URI = 'sqlite:///:memory:'"
with open(file_name, 'w+') as f:
f.write(to_write)
yield
os.remove(file_name)
def load_file_lines(filepath):
"""
Load lines of the file passed.
:param filepath: path to the file
:type filepath: str
"""
with open(filepath, 'r') as f:
contents = f.readlines()
return contents
def mock_decorator(f):
"""
Mock login_required decorator.
"""
@wraps(f)
def decorated_function(*args, **kwargs):
return f(*args, **kwargs)
return decorated_function
def generate_keys():
from utility import ROOT_DIR
secret_csrf_path = f"{os.path.join(ROOT_DIR, '')}secret_csrf"
secret_key_path = f"{os.path.join(ROOT_DIR, '')}secret_key"
if not os.path.exists(secret_csrf_path):
secret_csrf_cmd = f"head -c 24 /dev/urandom > {secret_csrf_path}"
os.system(secret_csrf_cmd)
if not os.path.exists(secret_key_path):
secret_key_cmd = f"head -c 24 /dev/urandom > {secret_key_path}"
os.system(secret_key_cmd)
return {'secret_csrf_path': secret_csrf_path, 'secret_key_path': secret_key_path}
def load_config(file):
key_paths = generate_keys()
with open(key_paths['secret_key_path'], 'rb') as secret_key_file:
secret_key = secret_key_file.read()
with open(key_paths['secret_csrf_path'], 'rb') as secret_csrf_file:
secret_csrf = secret_csrf_file.read()
return {
'Testing': True,
'DATABASE_URI': 'sqlite:///:memory:',
'WTF_CSRF_ENABLED': False,
'SQLALCHEMY_POOL_SIZE': 1,
'GITHUB_DEPLOY_KEY': "test_deploy",
'GITHUB_CI_KEY': "test_ci",
'GITHUB_TOKEN': "",
'GITHUB_BOT': "",
'GITHUB_OWNER': "test_owner",
'GITHUB_REPOSITORY': "test_repo",
'HMAC_KEY': "test_key",
'MIN_PWD_LEN': 10,
'MAX_PWD_LEN': 500,
'SAMPLE_REPOSITORY': "temp",
'KVM_LINUX_NAME': "linux-test",
'KVM_WINDOWS_NAME': "window-test",
'SECRET_KEY': secret_key,
'CSRF_SESSION_KEY': secret_csrf
}
def mock_api_request_github(url, data=None, timeout=None):
if url == "https://api.github.com/repos/test/test_repo/commits/abcdef":
return MockResponse({}, 200)
elif url == "https://api.github.com/user":
return MockResponse({"login": "test"}, 200)
elif "https://api.github.com/user" in url:
return MockResponse({"login": url.split("/")[-1]}, 200)
elif url == "https://api.github.com/repos/test_owner/test_repo/issues":
return MockResponse({'number': 1,
'title': "test title",
'user': {'login': "test_user"},
'created_at': "2011-04-14T16:00:49Z",
'state': "open"}, 201)
elif url == "https://api.github.com/repos/test/test_repo/commits/mockWillReturn500":
return MockResponse({}, 500)
elif url == "https://api.github.com/meta?client_id=&client_secret=":
return MockResponse({'verifiable_password_authentication': True,
'github_services_sha': "abcdefg",
'hooks': [
"192.30.252.0/22",
"185.199.108.0/22"
]}, 200)
return MockResponse({}, 404)
# TODO: replace this with something smarter
signup_information = {
'valid_email': "someone@example.com",
'existing_user_email': "dummy@example.com",
'existing_user_name': "dummy",
'existing_user_pwd': "dummy_pwd",
'existing_user_role': Role.user
}
def generate_signature(data, private_key):
"""
Generate signature token of hook request
:param data: Signature's data
:param private_key: Signature's token
"""
import hashlib
import hmac
algorithm = hashlib.__dict__.get('sha1')
encoded_key = bytes(private_key, 'latin-1')
mac = hmac.new(encoded_key, msg=data, digestmod=algorithm)
return mac.hexdigest()
def generate_git_api_header(event, sig):
"""
Create header for GitHub API Request, based on header information from https://developer.github.com/webhooks/.
:param event: Name of the event type that triggered the delivery.
:param sig: The HMAC hex digest of the response body. The HMAC hex digest is generated
using the sha1 hash function and the secret as the HMAC key.
"""
return Headers([
('X-GitHub-Event', event),
('X-GitHub-Delivery', "72d3162e-cc78-11e3-81ab-4c9367dc0958"),
('X-Hub-Signature', f"sha1={sig}"),
('User-Agent', "GitHub-Hookshot/044aadd"),
('Content-Type', "application/json"),
('Content-Length', 6615)
])
class BaseTestCase(TestCase):
@mock.patch('config_parser.parse_config', side_effect=load_config)
def create_app(self, mock_config):
"""
Create an instance of the app with the testing configuration
:return:
"""
user = namedtuple('user', "name password email github_token")
self.user = user(name="test", password="test123",
email="test@example.com", github_token="abcdefgh")
from run import app
return app
def setUp(self):
self.app.preprocess_request()
g.db = create_session(
self.app.config['DATABASE_URI'], drop_tables=True)
# enable Foreign keys for unit tests
g.db.execute('pragma foreign_keys=on')
general_data = [
GeneralData('last_commit', "1978060bf7d2edd119736ba3ba88341f3bec3323"),
GeneralData(f'fetch_commit_{TestPlatform.linux.value}', "1978060bf7d2edd119736ba3ba88341f3bec3323"),
GeneralData(f'fetch_commit_{TestPlatform.windows.value}', "1978060bf7d2edd119736ba3ba88341f3bec3323")
]
g.db.add_all(general_data)
self.ccextractor_version = CCExtractorVersion(
"1.2.3", "2013-02-27T19:35:32Z", "1978060bf7d2edd119736ba3ba88341f3bec3323")
g.db.add(self.ccextractor_version)
fork = Fork(f"https://github.com/{g.github['repository_owner']}/{g.github['repository']}.git")
g.db.add(fork)
g.db.commit()
dummy_user = User(signup_information['existing_user_name'], signup_information['existing_user_role'],
signup_information['existing_user_email'], signup_information['existing_user_pwd'])
g.db.add(dummy_user)
g.db.commit()
test = [
Test(TestPlatform.linux, TestType.pull_request, 1, "master", "1978060bf7d2edd119736ba3ba88341f3bec3323", 1),
Test(TestPlatform.linux, TestType.pull_request, 1, "master", "abcdefgh", 1)
]
g.db.add_all(test)
g.db.commit()
categories = [
Category("Broken", "Samples that are broken"),
Category("DVB", "Samples that contain DVB subtitles"),
Category("DVD", "Samples that contain DVD subtitles"),
Category("MP4", "Samples that are stored in the MP4 format"),
Category("General", "General regression samples")
]
g.db.add_all(categories)
g.db.commit()
samples = [
Sample("sample1", "ts", "sample1"),
Sample("sample2", "ts", "sample2")
]
g.db.add_all(samples)
g.db.commit()
upload = [
Upload(1, 1, 1, Platform.windows),
Upload(1, 2, 1, Platform.linux)
]
g.db.add_all(upload)
g.db.commit()
regression_tests = [
RegressionTest(1, "-autoprogram -out=ttxt -latin1 -2", InputType.file, OutputType.file, 3, 10),
RegressionTest(2, "-autoprogram -out=ttxt -latin1 -ucla", InputType.file, OutputType.file, 1, 10)
]
g.db.add_all(regression_tests)
g.db.commit()
categories[0].regression_tests.append(regression_tests[0])
categories[2].regression_tests.append(regression_tests[1])
regression_test_outputs = [
RegressionTestOutput(1, "sample_out1", ".srt", ""),
RegressionTestOutput(2, "sample_out2", ".srt", "")
]
g.db.add_all(regression_test_outputs)
g.db.commit()
rtof = RegressionTestOutputFiles("bluedabadee", 2)
g.db.add(rtof)
g.db.commit()
test_result_progress = [
TestProgress(1, TestStatus.preparation, "Test 1 preparation"),
TestProgress(1, TestStatus.building, "Test 1 building"),
TestProgress(1, TestStatus.testing, "Test 1 testing"),
TestProgress(1, TestStatus.completed, "Test 1 completed"),
TestProgress(2, TestStatus.preparation, "Test 2 preparation"),
TestProgress(2, TestStatus.building, "Test 2 building"),
TestProgress(2, TestStatus.testing, "Test 2 testing"),
TestProgress(2, TestStatus.completed, "Test 2 completed")
]
g.db.add_all(test_result_progress)
g.db.commit()
test_results = [
TestResult(1, 1, 200, 0, 0),
TestResult(1, 2, 601, 0, 0),
TestResult(2, 1, 200, 200, 0),
TestResult(2, 2, 601, 0, 0)
]
g.db.add_all(test_results)
g.db.commit()
test_result_files = [
TestResultFile(1, 1, 1, "sample_out1"),
TestResultFile(1, 2, 2, "sample_out2"),
TestResultFile(2, 1, 1, "sample_out1"),
TestResultFile(2, 2, 2, "sample_out2", "out2")
]
g.db.add_all(test_result_files)
g.db.commit()
forbidden_mime = ForbiddenMimeType("application/javascript")
forbidden_ext = [
ForbiddenExtension("js"),
ForbiddenExtension("com")
]
g.db.add(forbidden_mime)
g.db.add_all(forbidden_ext)
g.db.commit()
@staticmethod
def create_login_form_data(email, password) -> dict:
"""
Creates the form data for a login event.
:return: A dictionary containing the name, password and submit fields.
"""
return {'email': email, 'password': password, 'submit': True}
@staticmethod
def create_customize_form(commit_hash, platform, commit_select=None, regression_test=None):
if regression_test is None:
regression_test = [1, 2]
if commit_select is None:
commit_select = ['', '']
return {
'commit_hash': commit_hash,
'commit_select': commit_select,
'platform': platform,
'regression_test': regression_test,
'add': True
}
def create_forktest(self, commit_hash, platform, regression_tests=None):
"""
Create a test on fork based on commit and platform
"""
from flask import g
fork_url = f"https://github.com/{self.user.name}/{g.github['repository']}.git"
fork = Fork(fork_url)
g.db.add(fork)
g.db.commit()
test = Test(platform, TestType.commit, fork.id, 'master', commit_hash)
g.db.add(test)
g.db.commit()
user = User.query.filter(User.email == self.user.email).first()
test_fork = TestFork(user.id, test.id)
g.db.add(test_fork)
g.db.commit()
if regression_tests is not None:
for regression_test in regression_tests:
customized_test = CustomizedTest(test.id, regression_test)
g.db.add(customized_test)
g.db.commit()
def create_user_with_role(self, user, email, password, role, github_token=None):
"""
Create a user with specified user details and role.
"""
from flask import g
user = User(self.user.name, email=self.user.email,
password=User.generate_hash(self.user.password), role=role, github_token=github_token)
g.db.add(user)
g.db.commit()
@staticmethod
def create_random_string(length=32):
import random
import string
random_string = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(length)])
return random_string
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
|
|
from __future__ import unicode_literals
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .adobetv import (
AdobeTVIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
from .aftenposten import AftenpostenIE
from .aftonbladet import AftonbladetIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appleconnect import AppleConnectIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import (
ARDIE,
ARDMediathekIE,
SportschauIE,
)
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .azubu import AzubuIE
from .baidu import BaiduVideoIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbc import (
BBCCoUkIE,
BBCIE,
)
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
from .bet import BetIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .cbsnews import CBSNewsIE
from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .collegehumor import CollegeHumorIE
from .collegerama import CollegeRamaIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .comcarcoff import ComCarCoffIE
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
DailymotionCloudIE,
)
from .daum import DaumIE
from .dbtv import DBTVIE
from .dcn import DCNIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dhm import DHMIE
from .dotsub import DotsubIE
from .douyutv import DouyuTVIE
from .dramafever import (
DramaFeverIE,
DramaFeverSeriesIE,
)
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dvtv import DVTVIE
from .dump import DumpIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .dropbox import DropboxIE
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import ESPNIE
from .esri import EsriVideoIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
from .fktv import FKTVIE
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .fourtube import FourTubeIE
from .foxgay import FoxgayIE
from .foxnews import FoxNewsIE
from .foxsports import FoxSportsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamersyde import GamersydeIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .historicfilms import HistoricFilmsIE
from .history import HistoryIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import (
ImgurIE,
ImgurAlbumIE,
)
from .ina import InaIE
from .indavideo import (
IndavideoIE,
IndavideoEmbedIE,
)
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
from .ir90tv import Ir90TvIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jukebox import JukeboxIE
from .jpopsukitv import JpopsukiIE
from .kaltura import KalturaIE
from .kanalplay import KanalPlayIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .karrierevideos import KarriereVideosIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .kuwo import (
KuwoIE,
KuwoAlbumIE,
KuwoChartIE,
KuwoSingerIE,
KuwoCategoryIE,
KuwoMvIE,
)
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .lecture2go import Lecture2GoIE
from .letv import (
LetvIE,
LetvTvIE,
LetvPlaylistIE
)
from .libsyn import LibsynIE
from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .megavideoz import MegaVideozIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .minhateca import MinhatecaIE
from .ministrygrid import MinistryGridIE
from .miomio import MioMioIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
MTVDEIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .muzu import MuzuTVIE
from .mwave import MwaveIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvi import MyviIE
from .myvideo import MyVideoIE
from .myvidster import MyVidsterIE
from .nationalgeographic import NationalGeographicIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
NBCSportsIE,
NBCSportsVPlayerIE,
MSNBCIE,
)
from .ndr import (
NDRIE,
NJoyIE,
NDREmbedBaseIE,
NDREmbedIE,
NJoyEmbedIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .nerdist import NerdistIE
from .neteasemusic import (
NetEaseMusicIE,
NetEaseMusicAlbumIE,
NetEaseMusicSingerIE,
NetEaseMusicListIE,
NetEaseMusicMvIE,
NetEaseMusicProgramIE,
NetEaseMusicDjRadioIE,
)
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyIE,
)
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import (
NHLIE,
NHLNewsIE,
NHLVideocenterIE,
)
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import NovaIE
from .novamov import NovaMovIE
from .nowness import (
NownessIE,
NownessPlaylistIE,
NownessSeriesIE,
)
from .nowtv import NowTVIE
from .nowvideo import NowVideoIE
from .npo import (
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
VPROIE,
WNLIE
)
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKTVIE,
)
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
)
from .nuvid import NuvidIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .onionstudios import OnionStudiosIE
from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
ORFIPTVIE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .periscope import (
PeriscopeIE,
QuickscopeIE,
)
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .pinkbike import PinkbikeIE
from .planetaplay import PlanetaPlayIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playtvak import PlaytvakIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .pluralsight import (
PluralsightIE,
PluralsightCourseIE,
)
from .podomatic import PodomaticIE
from .porn91 import Porn91IE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubPlaylistIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE
from .primesharetv import PrimeShareTVIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .qqmusic import (
QQMusicIE,
QQMusicSingerIE,
QQMusicAlbumIE,
QQMusicToplistIE,
QQMusicPlaylistIE,
)
from .quickvid import QuickVidIE
from .r7 import R7IE
from .radiode import RadioDeIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .rds import RDSIE
from .redtube import RedTubeIE
from .restudy import RestudyIE
from .reverbnation import ReverbNationIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rte import RteIE
from .rtlnl import RtlNlIE
from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
from .rtvnh import RTVNHIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .sandia import SandiaIE
from .safari import (
SafariIE,
SafariCourseIE,
)
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
from .senateisvp import SenateISVPIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shahid import ShahidIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .snagfilms import (
SnagFilmsIE,
SnagFilmsEmbedIE,
)
from .snotr import SnotrIE
from .sohu import SohuIE
from .soompi import (
SoompiIE,
SoompiShowIE,
)
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthParkDeIE,
SouthParkDkIE,
SouthParkEsIE,
SouthParkNlIE
)
from .space import SpaceIE
from .spankbang import SpankBangIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import (
SportBoxIE,
SportBoxEmbedIE,
)
from .sportdeutschland import SportDeutschlandIE
from .srf import SrfIE
from .srmediathek import SRMediathekIE
from .ssa import SSAIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .sunporno import SunPornoIE
from .svt import (
SVTIE,
SVTPlayIE,
)
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .tass import TassIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telegraaf import TelegraafIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .testtube import TestTubeIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import (
ThePlatformIE,
ThePlatformFeedIE,
)
from .thesixtyone import TheSixtyOneIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tmz import (
TMZIE,
TMZArticleIE,
)
from .tnaflix import (
TNAFlixIE,
EMPFlixIE,
MovieFapIE,
)
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tunein import TuneInIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tv2 import (
TV2IE,
TV2ArticleIE,
)
from .tv4 import TV4IE
from .tvc import (
TVCIE,
TVCArticleIE,
)
from .tvigle import TvigleIE
from .tvp import TvpIE, TvpSeriesIE
from .tvplay import TVPlayIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twentytwotracks import (
TwentyTwoTracksIE,
TwentyTwoTracksGenreIE
)
from .twitch import (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchPastBroadcastsIE,
TwitchBookmarksIE,
TwitchStreamIE,
)
from .twitter import TwitterCardIE
from .ubu import UbuIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
from .ultimedia import UltimediaIE
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .varzesh3 import Varzesh3IE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vessel import VesselIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import (
BTArticleIE,
BTVestlendingenIE,
VGTVIE,
)
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .viewster import ViewsterIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import (
VikiIE,
VikiChannelIE,
)
from .vk import (
VKIE,
VKUserVideosIE,
)
from .vlive import VLiveIE
from .vodlocker import VodlockerIE
from .voicerepublic import VoiceRepublicIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
WDRMausIE,
)
from .webofstories import (
WebOfStoriesIE,
WebOfStoriesPlaylistIE,
)
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import WrzutaIE
from .wsj import WSJIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xhamster import (
XHamsterIE,
XHamsterEmbedIE,
)
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xstream import XstreamIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xvideos import XVideosIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
)
from .yam import YamIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
)
from .yesjapan import YesJapanIE
from .yinyuetai import YinYueTaiIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zdf import ZDFIE, ZDFChannelIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def list_extractors(age_limit):
"""
Return a list of extractors that are suitable for the given age,
sorted by extractor ID.
"""
return sorted(
filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()),
key=lambda ie: ie.IE_NAME.lower())
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name + 'IE']
|
|
import os
import shutil
import textwrap
from collections import defaultdict
from multiprocessing import cpu_count
from subprocess import call, Popen, PIPE
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from rnaseq_lib.data import map_genes
from rnaseq_lib.docker import fix_permissions, get_base_call
from rnaseq_lib.utils import mkdir_p
def log2fc(a, b, pad=0.001):
"""
Calculate the log2 Fold Change between two arrays, floats, or integers
a and b cannot be, nor contain, values less than 0
:param int|float|np.array a: Value or array
:param int|float|np.array b: Value or array
:param int|float pad: Buffer to add to value before log2 calculation
:return: L2FC array or value
:rtype: int|float|np.array
"""
return np.log2(a + pad) - np.log2(b + pad)
def de_pearson_dataframe(df, genes, pair_by='type', gtex=True, tcga=True):
"""
PearsonR scores of gene differential expression between tumor and normal types.
1. Calculate log2FC of genes for TCGA tumor samples with matching TCGA normal types
2. Compare log2fc to tumor type compared to all other normal types
3. Calculate PearsonR and save
:param pd.DataFrame df: Exp/TPM dataframe containing "type"/"tissue/tumor/label" metadata columns
:param list genes: Genes to use in differential expression calculation
:param str pair_by: How to pair tumors/normals. Either by "type" or "tissue"
:param bool gtex: If True, includes GTEx in normal set
:param bool tcga: If True, includes TCGA in normal set
:return: PearsonR dataframe
:rtype: pd.DataFrame
"""
# Subset by Tumor/Normal
tumor = df[df.label == 'tcga-tumor']
tcga_n = df[df.label == 'tcga-normal']
# Determine normal comparison group based on options
if gtex and tcga:
normal = df[df.tumor == 'no']
elif gtex:
normal = df[df.label == 'gtex']
else:
normal = tcga_n
# Identify tumor types with paired tcga-normal
tum_types = [x for x in sorted(tumor[pair_by].unique())
if x in sorted(df[df.label == 'tcga-normal'][pair_by].unique())]
norm_types = []
# For all paired tumor_types, calculate l2fc, then PearsonR of l2fc to all normal tumor types
pearson_l2fc = defaultdict(list)
for tum_type in tum_types:
# First calculate TCGA tumor/normal prior for comparison
t_med = tumor[tumor[pair_by] == tum_type][genes].median()
n_med = tcga_n[tcga_n[pair_by] == tum_type][genes].median()
prior_l2fc = log2fc(t_med, n_med)
# For every normal type, calculate pearsonR correlation
for (norm_type, label), _ in normal.groupby(pair_by).label.value_counts().iteritems():
if tum_type == norm_type:
l2fc = prior_l2fc
else:
n_med = normal[normal[pair_by] == norm_type][genes].median()
l2fc = log2fc(t_med, n_med)
# Calculate PearsonR of l2fc and comparison tissue/type
pearson_r = round(pearsonr(prior_l2fc, l2fc)[0], 2)
pearson_l2fc[tum_type[:20]].append(pearson_r)
norm_label = '{}_{}'.format(label, norm_type[:20])
if norm_label not in norm_types:
norm_types.append(norm_label)
return pd.DataFrame(pearson_l2fc, index=norm_types)
def run_deseq2(df_path, group_a, group_b, output_dir, cores=None):
"""
Runs DESeq2 standard comparison between group A and group B
:param str df_path: Path to samples by genes dataframe
:param list(str) group_a: List of samples in group A
:param list(str) group_b: List of samples in group B
:param str output_dir: Full path to output directory
:param int cores: Number of cores to use. Defaults to # of cores on machine.
"""
# Make workspace directories
work_dir = os.path.join(output_dir, 'work_dir')
mkdir_p(work_dir)
# Write out vectors
tissue_vector = os.path.join(work_dir, 'tissue.vector')
with open(tissue_vector, 'w') as f:
f.write('\n'.join(group_a + group_b))
disease_vector = os.path.join(work_dir, 'disease.vector')
with open(disease_vector, 'w') as f:
f.write('\n'.join(['A' if x in group_a else 'B' for x in group_a + group_b]))
# Write out script
cores = cores if cores else int(cpu_count())
script_path = os.path.join(work_dir, 'deseq2.R')
with open(script_path, 'w') as f:
f.write(
textwrap.dedent("""
library('DESeq2'); library('data.table'); library('BiocParallel')
register(MulticoreParam({cores}))
# Argument parsing
args <- commandArgs(trailingOnly = TRUE)
df_path <- args[1]
tissue_path <- args[2]
disease_path <- args[3]
output_dir <- '/data/'
# Read in vectors
tissue_vector <- read.table(tissue_path)$V1
disease_vector <- read.table(disease_path)$V1
# Read in table and process
n <- read.table(df_path, sep='\\t', header=1, row.names=1, check.names=FALSE)
sub <- n[, colnames(n)%in%tissue_vector]
setcolorder(sub, as.character(tissue_vector))
# Preprocessing
countData <- round(sub)
colData <- data.frame(disease=disease_vector, row.names=colnames(countData))
y <- DESeqDataSetFromMatrix(countData = countData, colData = colData, design = ~ disease)
# Run DESeq2
y <- DESeq(y, parallel=TRUE)
res <- results(y, parallel=TRUE)
summary(res)
# Write out table
resOrdered <- res[order(res$padj),]
res_name <- 'results.tsv'
res_path <- paste(output_dir, res_name, sep='/')
write.table(as.data.frame(resOrdered), file=res_path, col.names=NA, sep='\\t', quote=FALSE)
# MA Plot
ma_name <- 'MA.pdf'
ma_path <- paste(output_dir, ma_name, sep='/')
pdf(ma_path, width=7, height=7)
plotMA(res, main='DESeq2')
dev.off()
# Dispersion Plot
disp_name <- 'dispersion.pdf'
disp_path <- paste(output_dir, disp_name, sep='/')
pdf(disp_path, width=7, height=7)
plotDispEsts( y, ylim = c(1e-6, 1e1) )
dev.off()
# PVal Hist
hist_name <- 'pval-hist.pdf'
hist_path <- paste(output_dir, hist_name, sep='/')
pdf(hist_path, width=7, height=7)
hist( res$pvalue, breaks=20, col="grey" )
dev.off()
# Ratios plots
qs <- c( 0, quantile( res$baseMean[res$baseMean > 0], 0:7/7 ) )
bins <- cut( res$baseMean, qs )
levels(bins) <- paste0("~",round(.5*qs[-1] + .5*qs[-length(qs)]))
ratios <- tapply( res$pvalue, bins, function(p) mean( p < .01, na.rm=TRUE ) )
ratio_name <- 'ratios.pdf'
ratio_path <- paste(output_dir, ratio_name, sep='/')
pdf(ratio_path, width=7, height=7)
barplot(ratios, xlab="mean normalized count", ylab="ratio of small $p$ values")
dev.off()
""".format(cores=cores)))
# Call DESeq2
docker_parameters = ['docker', 'run',
'-v', '{}:/data'.format(output_dir),
'-v', '{}:/df'.format(os.path.dirname(df_path)),
'jvivian/deseq2']
parameters = ['/data/work_dir/deseq2.R',
'/df/{}'.format(os.path.basename(df_path)),
'/data/{}'.format(os.path.join('work_dir', 'tissue.vector')),
'/data/{}'.format(os.path.join('work_dir', 'disease.vector'))]
print '\nCalling: {}\n'.format(' '.join(docker_parameters + parameters))
p = Popen(docker_parameters + parameters, stderr=PIPE, stdout=PIPE)
out, err = p.communicate()
if out or err:
print out
print err
# Fix output of files
fix_permissions(tool='jvivian/deseq2', work_dir=output_dir)
# Add gene names to output
output_tsv = os.path.join(output_dir, 'results.tsv')
df = pd.read_csv(output_tsv, index_col=0, sep='\t')
df.index = map_genes(df.index)
df.to_csv(output_tsv, sep='\t')
# Clean up
shutil.rmtree(work_dir)
def deseq2_normalize(df_path,
output_dir='.',
map_gene_names=True,
clean_workdir=True,
normalize_fn=None,
suffix='.deseq2-normalized.tsv'):
"""
Accepts a gene by sample expression matrix normalized values with DESeq2
Output filename: <INPUT>.deseq2-normalized.tsv
:param str df_path: Path to input expression gene by sample dataframe
:param str output_dir: Output directory
:param bool map_gene_names: If True, maps gene IDs to gene names
:param bool clean_workdir: If True, deletes temporary work directory
:param fn normalize_fn: Pass a function to apply to the dataframe before normalization. e.g. lambda x: 2**x + 1
:param str suffix: Suffix added after df_paths basename
:return: Path to normalized dataframe
:rtype: str
"""
# Make workspace directory
output_dir = os.path.abspath(output_dir)
work_dir = os.path.join(output_dir, 'work_dir')
mkdir_p(work_dir)
# Write out pseudo-vector
# Tested that results are the same regardless of vector grouping
samples = [x.strip() for x in open(df_path, 'r').readline().split()]
tissue_vector = os.path.join(work_dir, 'tissue.vector')
with open(tissue_vector, 'w') as f:
f.write('\n'.join(samples))
if normalize_fn:
df = pd.read_csv(df_path, sep='\t', index_col=0)
df = df.apply(normalize_fn)
df_path = os.path.join(os.path.dirname(df_path), 'processed.' + os.path.basename(df_path))
df.to_csv(df_path, sep='\t')
# Write normalization script
output_path = os.path.join(output_dir, os.path.basename(df_path).split('.')[0] + suffix)
script_path = os.path.join(work_dir, 'deseq2.R')
with open(script_path, 'w') as f:
f.write(
textwrap.dedent("""
suppressMessages(library('DESeq2'))
# Argument parsing
output_dir <- '/data/'
# Read in pseudo-vector
tissue_vector <- read.table('/data/work_dir/tissue.vector')$V1
# Read in table and process
print("Reading in dataframe")
n <- read.table('/df/{df_path}', sep='\\t', header=1, row.names=1, check.names=FALSE)
# Preprocessing
print("Rounding data to integers")
countData <- round(n)
print("Creating DESeq2 Dataset Object")
colData <- data.frame(tissue=tissue_vector, row.names=colnames(countData))
dds <- DESeqDataSetFromMatrix(countData = countData, colData = colData, design = ~ tissue)
# Estimate size factors
print("Estimating Size Factors")
dds <- estimateSizeFactors(dds)
# Extract Normalized Counts and Write
print("Extracting normalized counts")
norm <- counts(dds, normalized = TRUE)
print("Writing output: {output_name}")
write.table(norm, file="/data/{output_name}", sep='\\t', quote=F, dec='.', col.names=NA)
""".format(df_path=os.path.basename(df_path), output_name=os.path.basename(output_path))))
# Call Docker
base_params = get_base_call(os.path.dirname(output_path))
parameters = base_params + ['-v', '{}:/df'.format(os.path.abspath(os.path.dirname(df_path))),
'jvivian/deseq2',
'/data/work_dir/deseq2.R']
print '\nCalling: {}\n'.format(' '.join(parameters))
call(parameters)
# Fix output of files
fix_permissions(tool='jvivian/deseq2', work_dir=output_dir)
# Map gene IDs to gene names
if map_gene_names:
df = pd.read_csv(output_path, index_col=0, sep='\t')
df.index = map_genes(df.index)
df.to_csv(output_path, sep='\t')
# Clean up
if clean_workdir:
shutil.rmtree(work_dir)
return output_path
|
|
# -*- coding: utf8
"""Random Projection transformers
Random Projections are a simple and computationally efficient way to
reduce the dimensionality of the data by trading a controlled amount
of accuracy (as additional variance) for faster processing times and
smaller model sizes.
The dimensions and distribution of Random Projections matrices are
controlled so as to preserve the pairwise distances between any two
samples of the dataset.
The main theoretical result behind the efficiency of random projection is the
`Johnson-Lindenstrauss lemma (quoting Wikipedia)
<http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_:
In mathematics, the Johnson-Lindenstrauss lemma is a result
concerning low-distortion embeddings of points from high-dimensional
into low-dimensional Euclidean space. The lemma states that a small set
of points in a high-dimensional space can be embedded into a space of
much lower dimension in such a way that distances between the points are
nearly preserved. The map used for the embedding is at least Lipschitz,
and can even be taken to be an orthogonal projection.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>,
# Arnaud Joly <a.joly@ulg.ac.be>
# License: BSD 3 clause
from __future__ import division
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.testing import assert_equal
import scipy.sparse as sp
from .base import BaseEstimator, TransformerMixin
from .externals import six
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.extmath import safe_sparse_dot
from .utils.random import sample_without_replacement
from .utils.validation import check_arrays
__all__ = ["SparseRandomProjection",
"GaussianRandomProjection",
"johnson_lindenstrauss_min_dim"]
def johnson_lindenstrauss_min_dim(n_samples, eps=0.1):
"""Find a 'safe' number of components to randomly project to
The distortion introduced by a random projection `p` only changes the
distance between two points by a factor (1 +- eps) in an euclidean space
with good probability. The projection `p` is an eps-embedding as defined
by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features], eps is in ]0, 1[ and p is a projection by a random Gaussian
N(0, 1) matrix with shape [n_components, n_features] (or a sparse
Achlioptas matrix).
The minimum number of components to guarantee the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
Note that the number of dimensions is independent of the original
number of features but instead depends on the size of the dataset:
the larger the dataset, the higher is the minimal dimensionality of
an eps-embedding.
Parameters
----------
n_samples : int or numpy array of int greater than 0,
Number of samples. If an array is given, it will compute
a safe number of components array-wise.
eps : float or numpy array of float in ]0,1[, optional (default=0.1)
Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
If an array is given, it will compute a safe number of components
array-wise.
Returns
-------
n_components : int or numpy array of int,
The minimal number of components to guarantee with good probability
an eps-embedding with n_samples.
Examples
--------
>>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)
663
>>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])
array([ 663, 11841, 1112658])
>>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)
array([ 7894, 9868, 11841])
References
----------
.. [1] http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
.. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,
"An elementary proof of the Johnson-Lindenstrauss Lemma."
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654
"""
eps = np.asarray(eps)
n_samples = np.asarray(n_samples)
if np.any(eps <= 0.0) or np.any(eps >= 1):
raise ValueError(
"The JL bound is defined for eps in ]0, 1[, got %r" % eps)
if np.any(n_samples) <= 0:
raise ValueError(
"The JL bound is defined for n_samples greater than zero, got %r"
% n_samples)
denominator = (eps ** 2 / 2) - (eps ** 3 / 3)
return (4 * np.log(n_samples) / denominator).astype(np.int)
def _check_density(density, n_features):
"""Factorize density check according to Li et al."""
if density == 'auto':
density = 1 / np.sqrt(n_features)
elif density <= 0 or density > 1:
raise ValueError("Expected density in range ]0, 1], got: %r"
% density)
return density
def _check_input_size(n_components, n_features):
"""Factorize argument checking for random matrix generation"""
if n_components <= 0:
raise ValueError("n_components must be strictly positive, got %d" %
n_components)
if n_features <= 0:
raise ValueError("n_features must be strictly positive, got %d" %
n_components)
def gaussian_random_matrix(n_components, n_features, random_state=None):
""" Generate a dense Gaussian random matrix.
The components of the random matrix are drawn from
N(0, 1.0 / n_components).
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
random_state : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components : numpy array of shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
GaussianRandomProjection
sparse_random_matrix
"""
_check_input_size(n_components, n_features)
rng = check_random_state(random_state)
components = rng.normal(loc=0.0,
scale=1.0 / np.sqrt(n_components),
size=(n_components, n_features))
return components
def sparse_random_matrix(n_components, n_features, density='auto',
random_state=None):
"""Generalized Achlioptas random sparse matrix for random projection
Setting density to 1 / 3 will yield the original matrix by Dimitris
Achlioptas while setting a lower value will yield the generalization
by Ping Li et al.
If we note :math:`s = 1 / density`, the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
density : float in range ]0, 1] or 'auto', optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Returns
-------
components: numpy array or CSR matrix with shape [n_components, n_features]
The generated Gaussian random matrix.
See Also
--------
SparseRandomProjection
gaussian_random_matrix
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://www.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
_check_input_size(n_components, n_features)
density = _check_density(density, n_features)
rng = check_random_state(random_state)
if density == 1:
# skip index generation if totally dense
components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1
return 1 / np.sqrt(n_components) * components
else:
# Generate location of non zero elements
indices = []
offset = 0
indptr = [offset]
for i in xrange(n_components):
# find the indices of the non-zero components for row i
n_nonzero_i = rng.binomial(n_features, density)
indices_i = sample_without_replacement(n_features, n_nonzero_i,
random_state=rng)
indices.append(indices_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
# Among non zero components the probability of the sign is 50%/50%
data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1
# build the CSR structure by concatenating the rows
components = sp.csr_matrix((data, indices, indptr),
shape=(n_components, n_features))
return np.sqrt(1 / density) / np.sqrt(n_components) * components
class BaseRandomProjection(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class for random projections.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self, n_components='auto', eps=0.1, dense_output=False,
random_state=None):
self.n_components = n_components
self.eps = eps
self.dense_output = dense_output
self.random_state = random_state
self.components_ = None
self.n_components_ = None
@abstractmethod
def _make_random_matrix(n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
def fit(self, X, y=None):
"""Generate a sparse random projection matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
Training set: only the shape is used to find optimal random
matrix dimensions based on the theory referenced in the
afore mentioned papers.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X, y = check_arrays(X, y)
if not sp.issparse(X):
X = np.atleast_2d(X)
n_samples, n_features = X.shape
if self.n_components == 'auto':
self.n_components_ = johnson_lindenstrauss_min_dim(
n_samples=n_samples, eps=self.eps)
if self.n_components_ <= 0:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is invalid' % (
self.eps, n_samples, self.n_components_))
elif self.n_components_ > n_features:
raise ValueError(
'eps=%f and n_samples=%d lead to a target dimension of '
'%d which is larger than the original space with '
'n_features=%d' % (self.eps, n_samples, self.n_components_,
n_features))
else:
if self.n_components <= 0:
raise ValueError("n_components must be greater than 0, got %s"
% self.n_components_)
elif self.n_components > n_features:
warnings.warn(
"The number of components is higher than the number of"
" features: n_features < n_components (%s < %s)."
"The dimensionality of the problem will not be reduced."
% (n_features, self.n_components))
self.n_components_ = self.n_components
# Generate a projection matrix of size [n_components, n_features]
self.components_ = self._make_random_matrix(self.n_components_,
n_features)
# Check contract
assert_equal(
self.components_.shape,
(self.n_components_, n_features),
err_msg=('An error has occurred the self.components_ matrix has '
' not the proper shape.'))
return self
def transform(self, X, y=None):
"""Project the data by using matrix product with the random matrix
Parameters
----------
X : numpy array or scipy.sparse of shape [n_samples, n_features]
The input data to project into a smaller dimensional space.
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array or scipy sparse of shape [n_samples, n_components]
Projected array.
"""
X, y = check_arrays(X, y)
if self.components_ is None:
raise ValueError('No random projection matrix had been fit.')
if X.shape[1] != self.components_.shape[1]:
raise ValueError(
'Impossible to perform projection:'
'X at fit stage had a different number of features. '
'(%s != %s)' % (X.shape[1], self.components_.shape[1]))
if not sp.issparse(X):
X = np.atleast_2d(X)
X_new = safe_sparse_dot(X, self.components_.T,
dense_output=self.dense_output)
return X_new
class GaussianRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through Gaussian random projection
The components of the random matrix are drawn from N(0, 1 / n_components).
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
eps : strictly positive float, optional (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
``n_component_`` : int
Concrete number of components computed when n_components="auto".
``components_`` : numpy array of shape [n_components, n_features]
Random matrix used for the projection.
See Also
--------
SparseRandomProjection
"""
def __init__(self, n_components='auto', eps=0.1, random_state=None):
super(GaussianRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=True,
random_state=random_state)
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
return gaussian_random_matrix(n_components,
n_features,
random_state=random_state)
class SparseRandomProjection(BaseRandomProjection):
"""Reduce dimensionality through sparse random projection
Sparse random matrix is an alternative to dense random
projection matrix that guarantees similar embedding quality while being
much more memory efficient and allowing faster computation of the
projected data.
If we note `s = 1 / density` the components of the random matrix are
drawn from:
- -sqrt(s) / sqrt(n_components) with probability 1 / 2s
- 0 with probability 1 - 1 / s
- +sqrt(s) / sqrt(n_components) with probability 1 / 2s
Parameters
----------
n_components : int or 'auto', optional (default = 'auto')
Dimensionality of the target projection space.
n_components can be automatically adjusted according to the
number of samples in the dataset and the bound given by the
Johnson-Lindenstrauss lemma. In that case the quality of the
embedding is controlled by the ``eps`` parameter.
It should be noted that Johnson-Lindenstrauss lemma can yield
very conservative estimated of the required number of components
as it makes no assumption on the structure of the dataset.
density : float in range ]0, 1], optional (default='auto')
Ratio of non-zero component in the random projection matrix.
If density = 'auto', the value is set to the minimum density
as recommended by Ping Li et al.: 1 / sqrt(n_features).
Use density = 1 / 3.0 if you want to reproduce the results from
Achlioptas, 2001.
eps : strictly positive float, optional, (default=0.1)
Parameter to control the quality of the embedding according to
the Johnson-Lindenstrauss lemma when n_components is set to
'auto'.
Smaller values lead to better embedding and higher number of
dimensions (n_components) in the target projection space.
dense_output : boolean, optional (default=False)
If True, ensure that the output of the random projection is a
dense numpy array even if the input and random projection matrix
are both sparse. In practice, if the number of components is
small the number of zero components in the projected data will
be very small and it will be more CPU and memory efficient to
use a dense representation.
If False, the projected data uses a sparse representation if
the input is sparse.
random_state : integer, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
matrix at fit time.
Attributes
----------
``n_component_`` : int
Concrete number of components computed when n_components="auto".
``components_`` : CSR matrix with shape [n_components, n_features]
Random matrix used for the projection.
``density_`` : float in range 0.0 - 1.0
Concrete density computed from when density = "auto".
See Also
--------
GaussianRandomProjection
References
----------
.. [1] Ping Li, T. Hastie and K. W. Church, 2006,
"Very Sparse Random Projections".
http://www.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf
.. [2] D. Achlioptas, 2001, "Database-friendly random projections",
http://www.cs.ucsc.edu/~optas/papers/jl.pdf
"""
def __init__(self, n_components='auto', density='auto', eps=0.1,
dense_output=False, random_state=None):
super(SparseRandomProjection, self).__init__(
n_components=n_components,
eps=eps,
dense_output=dense_output,
random_state=random_state)
self.density = density
self.density_ = None
def _make_random_matrix(self, n_components, n_features):
""" Generate the random projection matrix
Parameters
----------
n_components : int,
Dimensionality of the target projection space.
n_features : int,
Dimensionality of the original source space.
Returns
-------
components : numpy array or CSR matrix [n_components, n_features]
The generated random matrix.
"""
random_state = check_random_state(self.random_state)
self.density_ = _check_density(self.density, n_features)
return sparse_random_matrix(n_components,
n_features,
density=self.density_,
random_state=random_state)
|
|
# stdlib
from collections import defaultdict, namedtuple
import time
import urlparse
# 3p
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers
class NodeNotFound(Exception):
pass
ESInstanceConfig = namedtuple(
'ESInstanceConfig', [
'pshard_stats',
'cluster_stats',
'password',
'service_check_tags',
'tags',
'timeout',
'url',
'username',
'pending_task_stats',
])
class ESCheck(AgentCheck):
SERVICE_CHECK_CONNECT_NAME = 'elasticsearch.can_connect'
SERVICE_CHECK_CLUSTER_STATUS = 'elasticsearch.cluster_health'
DEFAULT_TIMEOUT = 5
# Clusterwise metrics, pre aggregated on ES, compatible with all ES versions
PRIMARY_SHARD_METRICS = {
"elasticsearch.primaries.docs.count": ("gauge", "_all.primaries.docs.count"),
"elasticsearch.primaries.docs.deleted": ("gauge", "_all.primaries.docs.deleted"),
"elasticsearch.primaries.store.size": ("gauge", "_all.primaries.store.size_in_bytes"),
"elasticsearch.primaries.indexing.index.total": ("gauge", "_all.primaries.indexing.index_total"),
"elasticsearch.primaries.indexing.index.time": ("gauge", "_all.primaries.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.index.current": ("gauge", "_all.primaries.indexing.index_current"),
"elasticsearch.primaries.indexing.delete.total": ("gauge", "_all.primaries.indexing.delete_total"),
"elasticsearch.primaries.indexing.delete.time": ("gauge", "_all.primaries.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.delete.current": ("gauge", "_all.primaries.indexing.delete_current"),
"elasticsearch.primaries.get.total": ("gauge", "_all.primaries.get.total"),
"elasticsearch.primaries.get.time": ("gauge", "_all.primaries.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.current": ("gauge", "_all.primaries.get.current"),
"elasticsearch.primaries.get.exists.total": ("gauge", "_all.primaries.get.exists_total"),
"elasticsearch.primaries.get.exists.time": ("gauge", "_all.primaries.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.missing.total": ("gauge", "_all.primaries.get.missing_total"),
"elasticsearch.primaries.get.missing.time": ("gauge", "_all.primaries.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.total": ("gauge", "_all.primaries.search.query_total"),
"elasticsearch.primaries.search.query.time": ("gauge", "_all.primaries.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.current": ("gauge", "_all.primaries.search.query_current"),
"elasticsearch.primaries.search.fetch.total": ("gauge", "_all.primaries.search.fetch_total"),
"elasticsearch.primaries.search.fetch.time": ("gauge", "_all.primaries.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.fetch.current": ("gauge", "_all.primaries.search.fetch_current")
}
PRIMARY_SHARD_METRICS_POST_1_0 = {
"elasticsearch.primaries.merges.current": ("gauge", "_all.primaries.merges.current"),
"elasticsearch.primaries.merges.current.docs": ("gauge", "_all.primaries.merges.current_docs"),
"elasticsearch.primaries.merges.current.size": ("gauge", "_all.primaries.merges.current_size_in_bytes"),
"elasticsearch.primaries.merges.total": ("gauge", "_all.primaries.merges.total"),
"elasticsearch.primaries.merges.total.time": ("gauge", "_all.primaries.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.merges.total.docs": ("gauge", "_all.primaries.merges.total_docs"),
"elasticsearch.primaries.merges.total.size": ("gauge", "_all.primaries.merges.total_size_in_bytes"),
"elasticsearch.primaries.refresh.total": ("gauge", "_all.primaries.refresh.total"),
"elasticsearch.primaries.refresh.total.time": ("gauge", "_all.primaries.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.flush.total": ("gauge", "_all.primaries.flush.total"),
"elasticsearch.primaries.flush.total.time": ("gauge", "_all.primaries.flush.total_time_in_millis", lambda v: float(v)/1000)
}
STATS_METRICS = { # Metrics that are common to all Elasticsearch versions
"elasticsearch.docs.count": ("gauge", "indices.docs.count"),
"elasticsearch.docs.deleted": ("gauge", "indices.docs.deleted"),
"elasticsearch.store.size": ("gauge", "indices.store.size_in_bytes"),
"elasticsearch.indexing.index.total": ("gauge", "indices.indexing.index_total"),
"elasticsearch.indexing.index.time": ("gauge", "indices.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.index.current": ("gauge", "indices.indexing.index_current"),
"elasticsearch.indexing.delete.total": ("gauge", "indices.indexing.delete_total"),
"elasticsearch.indexing.delete.time": ("gauge", "indices.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.delete.current": ("gauge", "indices.indexing.delete_current"),
"elasticsearch.get.total": ("gauge", "indices.get.total"),
"elasticsearch.get.time": ("gauge", "indices.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.current": ("gauge", "indices.get.current"),
"elasticsearch.get.exists.total": ("gauge", "indices.get.exists_total"),
"elasticsearch.get.exists.time": ("gauge", "indices.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.missing.total": ("gauge", "indices.get.missing_total"),
"elasticsearch.get.missing.time": ("gauge", "indices.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.total": ("gauge", "indices.search.query_total"),
"elasticsearch.search.query.time": ("gauge", "indices.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.current": ("gauge", "indices.search.query_current"),
"elasticsearch.search.fetch.total": ("gauge", "indices.search.fetch_total"),
"elasticsearch.search.fetch.time": ("gauge", "indices.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.fetch.current": ("gauge", "indices.search.fetch_current"),
"elasticsearch.indices.segments.count": ("gauge", "indices.segments.count"),
"elasticsearch.indices.segments.memory_in_bytes": ("gauge", "indices.segments.memory_in_bytes"),
"elasticsearch.merges.current": ("gauge", "indices.merges.current"),
"elasticsearch.merges.current.docs": ("gauge", "indices.merges.current_docs"),
"elasticsearch.merges.current.size": ("gauge", "indices.merges.current_size_in_bytes"),
"elasticsearch.merges.total": ("gauge", "indices.merges.total"),
"elasticsearch.merges.total.time": ("gauge", "indices.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.merges.total.docs": ("gauge", "indices.merges.total_docs"),
"elasticsearch.merges.total.size": ("gauge", "indices.merges.total_size_in_bytes"),
"elasticsearch.refresh.total": ("gauge", "indices.refresh.total"),
"elasticsearch.refresh.total.time": ("gauge", "indices.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.flush.total": ("gauge", "indices.flush.total"),
"elasticsearch.flush.total.time": ("gauge", "indices.flush.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.process.open_fd": ("gauge", "process.open_file_descriptors"),
"elasticsearch.transport.rx_count": ("gauge", "transport.rx_count"),
"elasticsearch.transport.tx_count": ("gauge", "transport.tx_count"),
"elasticsearch.transport.rx_size": ("gauge", "transport.rx_size_in_bytes"),
"elasticsearch.transport.tx_size": ("gauge", "transport.tx_size_in_bytes"),
"elasticsearch.transport.server_open": ("gauge", "transport.server_open"),
"elasticsearch.thread_pool.bulk.active": ("gauge", "thread_pool.bulk.active"),
"elasticsearch.thread_pool.bulk.threads": ("gauge", "thread_pool.bulk.threads"),
"elasticsearch.thread_pool.bulk.queue": ("gauge", "thread_pool.bulk.queue"),
"elasticsearch.thread_pool.flush.active": ("gauge", "thread_pool.flush.active"),
"elasticsearch.thread_pool.flush.threads": ("gauge", "thread_pool.flush.threads"),
"elasticsearch.thread_pool.flush.queue": ("gauge", "thread_pool.flush.queue"),
"elasticsearch.thread_pool.generic.active": ("gauge", "thread_pool.generic.active"),
"elasticsearch.thread_pool.generic.threads": ("gauge", "thread_pool.generic.threads"),
"elasticsearch.thread_pool.generic.queue": ("gauge", "thread_pool.generic.queue"),
"elasticsearch.thread_pool.get.active": ("gauge", "thread_pool.get.active"),
"elasticsearch.thread_pool.get.threads": ("gauge", "thread_pool.get.threads"),
"elasticsearch.thread_pool.get.queue": ("gauge", "thread_pool.get.queue"),
"elasticsearch.thread_pool.index.active": ("gauge", "thread_pool.index.active"),
"elasticsearch.thread_pool.index.threads": ("gauge", "thread_pool.index.threads"),
"elasticsearch.thread_pool.index.queue": ("gauge", "thread_pool.index.queue"),
"elasticsearch.thread_pool.management.active": ("gauge", "thread_pool.management.active"),
"elasticsearch.thread_pool.management.threads": ("gauge", "thread_pool.management.threads"),
"elasticsearch.thread_pool.management.queue": ("gauge", "thread_pool.management.queue"),
"elasticsearch.thread_pool.merge.active": ("gauge", "thread_pool.merge.active"),
"elasticsearch.thread_pool.merge.threads": ("gauge", "thread_pool.merge.threads"),
"elasticsearch.thread_pool.merge.queue": ("gauge", "thread_pool.merge.queue"),
"elasticsearch.thread_pool.percolate.active": ("gauge", "thread_pool.percolate.active"),
"elasticsearch.thread_pool.percolate.threads": ("gauge", "thread_pool.percolate.threads"),
"elasticsearch.thread_pool.percolate.queue": ("gauge", "thread_pool.percolate.queue"),
"elasticsearch.thread_pool.refresh.active": ("gauge", "thread_pool.refresh.active"),
"elasticsearch.thread_pool.refresh.threads": ("gauge", "thread_pool.refresh.threads"),
"elasticsearch.thread_pool.refresh.queue": ("gauge", "thread_pool.refresh.queue"),
"elasticsearch.thread_pool.search.active": ("gauge", "thread_pool.search.active"),
"elasticsearch.thread_pool.search.threads": ("gauge", "thread_pool.search.threads"),
"elasticsearch.thread_pool.search.queue": ("gauge", "thread_pool.search.queue"),
"elasticsearch.thread_pool.snapshot.active": ("gauge", "thread_pool.snapshot.active"),
"elasticsearch.thread_pool.snapshot.threads": ("gauge", "thread_pool.snapshot.threads"),
"elasticsearch.thread_pool.snapshot.queue": ("gauge", "thread_pool.snapshot.queue"),
"elasticsearch.http.current_open": ("gauge", "http.current_open"),
"elasticsearch.http.total_opened": ("gauge", "http.total_opened"),
"jvm.mem.heap_committed": ("gauge", "jvm.mem.heap_committed_in_bytes"),
"jvm.mem.heap_used": ("gauge", "jvm.mem.heap_used_in_bytes"),
"jvm.mem.heap_in_use": ("gauge", "jvm.mem.heap_used_percent"),
"jvm.mem.heap_max": ("gauge", "jvm.mem.heap_max_in_bytes"),
"jvm.mem.non_heap_committed": ("gauge", "jvm.mem.non_heap_committed_in_bytes"),
"jvm.mem.non_heap_used": ("gauge", "jvm.mem.non_heap_used_in_bytes"),
"jvm.threads.count": ("gauge", "jvm.threads.count"),
"jvm.threads.peak_count": ("gauge", "jvm.threads.peak_count"),
"elasticsearch.fs.total.total_in_bytes": ("gauge", "fs.total.total_in_bytes"),
"elasticsearch.fs.total.free_in_bytes": ("gauge", "fs.total.free_in_bytes"),
"elasticsearch.fs.total.available_in_bytes": ("gauge", "fs.total.available_in_bytes"),
}
JVM_METRICS_POST_0_90_10 = {
"jvm.gc.collectors.young.count": ("gauge", "jvm.gc.collectors.young.collection_count"),
"jvm.gc.collectors.young.collection_time": ("gauge", "jvm.gc.collectors.young.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collectors.old.count": ("gauge", "jvm.gc.collectors.old.collection_count"),
"jvm.gc.collectors.old.collection_time": ("gauge", "jvm.gc.collectors.old.collection_time_in_millis", lambda v: float(v)/1000)
}
JVM_METRICS_PRE_0_90_10 = {
"jvm.gc.concurrent_mark_sweep.count": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_count"),
"jvm.gc.concurrent_mark_sweep.collection_time": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.par_new.count": ("gauge", "jvm.gc.collectors.ParNew.collection_count"),
"jvm.gc.par_new.collection_time": ("gauge", "jvm.gc.collectors.ParNew.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collection_count": ("gauge", "jvm.gc.collection_count"),
"jvm.gc.collection_time": ("gauge", "jvm.gc.collection_time_in_millis", lambda v: float(v)/1000),
}
ADDITIONAL_METRICS_POST_0_90_5 = {
"elasticsearch.search.fetch.open_contexts": ("gauge", "indices.search.open_contexts"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.filter_cache.evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.filter_cache.memory_size_in_bytes"),
"elasticsearch.id_cache.size": ("gauge", "indices.id_cache.memory_size_in_bytes"),
"elasticsearch.fielddata.size": ("gauge", "indices.fielddata.memory_size_in_bytes"),
"elasticsearch.fielddata.evictions": ("gauge", "indices.fielddata.evictions"),
}
ADDITIONAL_METRICS_PRE_0_90_5 = {
"elasticsearch.cache.field.evictions": ("gauge", "indices.cache.field_evictions"),
"elasticsearch.cache.field.size": ("gauge", "indices.cache.field_size_in_bytes"),
"elasticsearch.cache.filter.count": ("gauge", "indices.cache.filter_count"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.cache.filter_evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.cache.filter_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_0_0 = {
"elasticsearch.indices.translog.size_in_bytes": ("gauge", "indices.translog.size_in_bytes"),
"elasticsearch.indices.translog.operations": ("gauge", "indices.translog.operations"),
"elasticsearch.fs.total.disk_reads": ("rate", "fs.total.disk_reads"),
"elasticsearch.fs.total.disk_writes": ("rate", "fs.total.disk_writes"),
"elasticsearch.fs.total.disk_io_op": ("rate", "fs.total.disk_io_op"),
"elasticsearch.fs.total.disk_read_size_in_bytes": ("gauge", "fs.total.disk_read_size_in_bytes"),
"elasticsearch.fs.total.disk_write_size_in_bytes": ("gauge", "fs.total.disk_write_size_in_bytes"),
"elasticsearch.fs.total.disk_io_size_in_bytes": ("gauge", "fs.total.disk_io_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_3_0 = {
"elasticsearch.indices.segments.index_writer_memory_in_bytes": ("gauge", "indices.segments.index_writer_memory_in_bytes"),
"elasticsearch.indices.segments.version_map_memory_in_bytes": ("gauge", "indices.segments.version_map_memory_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_4_0 = {
"elasticsearch.indices.segments.index_writer_max_memory_in_bytes": ("gauge", "indices.segments.index_writer_max_memory_in_bytes"),
"elasticsearch.indices.segments.fixed_bit_set_memory_in_bytes": ("gauge", "indices.segments.fixed_bit_set_memory_in_bytes"),
}
CLUSTER_HEALTH_METRICS = {
"elasticsearch.number_of_nodes": ("gauge", "number_of_nodes"),
"elasticsearch.number_of_data_nodes": ("gauge", "number_of_data_nodes"),
"elasticsearch.active_primary_shards": ("gauge", "active_primary_shards"),
"elasticsearch.active_shards": ("gauge", "active_shards"),
"elasticsearch.relocating_shards": ("gauge", "relocating_shards"),
"elasticsearch.initializing_shards": ("gauge", "initializing_shards"),
"elasticsearch.unassigned_shards": ("gauge", "unassigned_shards"),
"elasticsearch.cluster_status": ("gauge", "status", lambda v: {"red": 0, "yellow": 1, "green": 2}.get(v, -1)),
}
CLUSTER_PENDING_TASKS = {
"elasticsearch.pending_tasks_total": ("gauge", "pending_task_total"),
"elasticsearch.pending_tasks_priority_high": ("gauge", "pending_tasks_priority_high"),
"elasticsearch.pending_tasks_priority_urgent": ("gauge", "pending_tasks_priority_urgent")
}
SOURCE_TYPE_NAME = 'elasticsearch'
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Host status needs to persist across all checks
self.cluster_status = {}
def get_instance_config(self, instance):
url = instance.get('url')
if url is None:
raise Exception("An url must be specified in the instance")
pshard_stats = _is_affirmative(instance.get('pshard_stats', False))
cluster_stats = _is_affirmative(instance.get('cluster_stats', False))
if 'is_external' in instance:
cluster_stats = _is_affirmative(instance.get('is_external', False))
pending_task_stats = _is_affirmative(instance.get('pending_task_stats', True))
# Support URLs that have a path in them from the config, for
# backwards-compatibility.
parsed = urlparse.urlparse(url)
if parsed[2] != "":
url = "%s://%s" % (parsed[0], parsed[1])
port = parsed.port
host = parsed.hostname
custom_tags = instance.get('tags', [])
service_check_tags = [
'host:%s' % host,
'port:%s' % port
]
service_check_tags.extend(custom_tags)
# Tag by URL so we can differentiate the metrics
# from multiple instances
tags = ['url:%s' % url]
tags.extend(custom_tags)
timeout = instance.get('timeout') or self.DEFAULT_TIMEOUT
config = ESInstanceConfig(
pshard_stats=pshard_stats,
cluster_stats=cluster_stats,
password=instance.get('password'),
service_check_tags=service_check_tags,
tags=tags,
timeout=timeout,
url=url,
username=instance.get('username'),
pending_task_stats=pending_task_stats
)
return config
def check(self, instance):
config = self.get_instance_config(instance)
# Check ES version for this instance and define parameters
# (URLs and metrics) accordingly
version = self._get_es_version(config)
health_url, nodes_url, stats_url, pshard_stats_url, pending_tasks_url, stats_metrics, \
pshard_stats_metrics = self._define_params(version, config.cluster_stats)
# Load clusterwise data
if config.pshard_stats:
pshard_stats_url = urlparse.urljoin(config.url, pshard_stats_url)
pshard_stats_data = self._get_data(pshard_stats_url, config)
self._process_pshard_stats_data(pshard_stats_data, config, pshard_stats_metrics)
# Load stats data.
stats_url = urlparse.urljoin(config.url, stats_url)
stats_data = self._get_data(stats_url, config)
self._process_stats_data(nodes_url, stats_data, stats_metrics, config)
# Load the health data.
health_url = urlparse.urljoin(config.url, health_url)
health_data = self._get_data(health_url, config)
self._process_health_data(health_data, config)
if config.pending_task_stats:
# Load the pending_tasks data.
pending_tasks_url = urlparse.urljoin(config.url, pending_tasks_url)
pending_tasks_data = self._get_data(pending_tasks_url, config)
self._process_pending_tasks_data(pending_tasks_data, config)
# If we're here we did not have any ES conn issues
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.OK,
tags=config.service_check_tags
)
def _get_es_version(self, config):
""" Get the running version of elasticsearch.
"""
try:
data = self._get_data(config.url, config, send_sc=False)
version = map(int, data['version']['number'].split('.')[0:3])
except Exception, e:
self.warning(
"Error while trying to get Elasticsearch version "
"from %s %s"
% (config.url, str(e))
)
version = [1, 0, 0]
self.service_metadata('version', version)
self.log.debug("Elasticsearch version is %s" % version)
return version
def _define_params(self, version, cluster_stats):
""" Define the set of URLs and METRICS to use depending on the
running ES version.
"""
pshard_stats_url = "/_stats"
if version >= [0, 90, 10]:
# ES versions 0.90.10 and above
health_url = "/_cluster/health?pretty=true"
nodes_url = "/_nodes?network=true"
pending_tasks_url = "/_cluster/pending_tasks?pretty=true"
# For "external" clusters, we want to collect from all nodes.
if cluster_stats:
stats_url = "/_nodes/stats?all=true"
else:
stats_url = "/_nodes/_local/stats?all=true"
additional_metrics = self.JVM_METRICS_POST_0_90_10
else:
health_url = "/_cluster/health?pretty=true"
nodes_url = "/_cluster/nodes?network=true"
pending_tasks_url = None
if cluster_stats:
stats_url = "/_cluster/nodes/stats?all=true"
else:
stats_url = "/_cluster/nodes/_local/stats?all=true"
additional_metrics = self.JVM_METRICS_PRE_0_90_10
stats_metrics = dict(self.STATS_METRICS)
stats_metrics.update(additional_metrics)
### Additional Stats metrics ###
if version >= [0, 90, 5]:
# ES versions 0.90.5 and above
additional_metrics = self.ADDITIONAL_METRICS_POST_0_90_5
else:
# ES version 0.90.4 and below
additional_metrics = self.ADDITIONAL_METRICS_PRE_0_90_5
stats_metrics.update(additional_metrics)
if version >= [1, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_0_0)
if version >= [1, 3, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_3_0)
if version >= [1, 4, 0]:
# ES versions 1.4 and above
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_4_0)
# Version specific stats metrics about the primary shards
pshard_stats_metrics = dict(self.PRIMARY_SHARD_METRICS)
if version >= [1, 0, 0]:
additional_metrics = self.PRIMARY_SHARD_METRICS_POST_1_0
pshard_stats_metrics.update(additional_metrics)
return health_url, nodes_url, stats_url, pshard_stats_url, pending_tasks_url, \
stats_metrics, pshard_stats_metrics
def _get_data(self, url, config, send_sc=True):
""" Hit a given URL and return the parsed json
"""
# Load basic authentication configuration, if available.
if config.username and config.password:
auth = (config.username, config.password)
else:
auth = None
try:
resp = requests.get(
url,
timeout=config.timeout,
headers=headers(self.agentConfig),
auth=auth
)
resp.raise_for_status()
except Exception as e:
if send_sc:
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.CRITICAL,
message="Error {0} when hitting {1}".format(e, url),
tags=config.service_check_tags
)
raise
return resp.json()
def _process_pending_tasks_data(self, data, config):
p_tasks = defaultdict(int)
for task in data.get('tasks', []):
p_tasks[task.get('priority')] += 1
node_data = {
'pending_task_total': sum(p_tasks.values()),
'pending_tasks_priority_high': p_tasks['high'],
'pending_tasks_priority_urgent': p_tasks['urgent'],
}
for metric in self.CLUSTER_PENDING_TASKS:
# metric description
desc = self.CLUSTER_PENDING_TASKS[metric]
self._process_metric(node_data, metric, *desc, tags=config.tags)
def _process_stats_data(self, nodes_url, data, stats_metrics, config):
cluster_stats = config.cluster_stats
for node_name in data['nodes']:
node_data = data['nodes'][node_name]
# On newer version of ES it's "host" not "hostname"
node_hostname = node_data.get(
'hostname', node_data.get('host', None))
# Override the metric hostname if we're hitting an external cluster
metric_hostname = node_hostname if cluster_stats else None
for metric, desc in stats_metrics.iteritems():
self._process_metric(
node_data, metric, *desc, tags=config.tags,
hostname=metric_hostname)
def _process_pshard_stats_data(self, data, config, pshard_stats_metrics):
for metric, desc in pshard_stats_metrics.iteritems():
self._process_metric(data, metric, *desc, tags=config.tags)
def _process_metric(self, data, metric, xtype, path, xform=None,
tags=None, hostname=None):
"""data: dictionary containing all the stats
metric: datadog metric
path: corresponding path in data, flattened, e.g. thread_pool.bulk.queue
xfom: a lambda to apply to the numerical value
"""
value = data
# Traverse the nested dictionaries
for key in path.split('.'):
if value is not None:
value = value.get(key, None)
else:
break
if value is not None:
if xform:
value = xform(value)
if xtype == "gauge":
self.gauge(metric, value, tags=tags, hostname=hostname)
else:
self.rate(metric, value, tags=tags, hostname=hostname)
else:
self._metric_not_found(metric, path)
def _process_health_data(self, data, config):
if self.cluster_status.get(config.url) is None:
self.cluster_status[config.url] = data['status']
if data['status'] in ["yellow", "red"]:
event = self._create_event(data['status'], tags=config.tags)
self.event(event)
if data['status'] != self.cluster_status.get(config.url):
self.cluster_status[config.url] = data['status']
event = self._create_event(data['status'], tags=config.tags)
self.event(event)
for metric, desc in self.CLUSTER_HEALTH_METRICS.iteritems():
self._process_metric(data, metric, *desc, tags=config.tags)
# Process the service check
cluster_status = data['status']
if cluster_status == 'green':
status = AgentCheck.OK
data['tag'] = "OK"
elif cluster_status == 'yellow':
status = AgentCheck.WARNING
data['tag'] = "WARN"
else:
status = AgentCheck.CRITICAL
data['tag'] = "ALERT"
msg = "{tag} on cluster \"{cluster_name}\" "\
"| active_shards={active_shards} "\
"| initializing_shards={initializing_shards} "\
"| relocating_shards={relocating_shards} "\
"| unassigned_shards={unassigned_shards} "\
"| timed_out={timed_out}" \
.format(**data)
self.service_check(
self.SERVICE_CHECK_CLUSTER_STATUS,
status,
message=msg,
tags=config.service_check_tags
)
def _metric_not_found(self, metric, path):
self.log.debug("Metric not found: %s -> %s", path, metric)
def _create_event(self, status, tags=None):
hostname = self.hostname.decode('utf-8')
if status == "red":
alert_type = "error"
msg_title = "%s is %s" % (hostname, status)
elif status == "yellow":
alert_type = "warning"
msg_title = "%s is %s" % (hostname, status)
else:
# then it should be green
alert_type = "success"
msg_title = "%s recovered as %s" % (hostname, status)
msg = "ElasticSearch: %s just reported as %s" % (hostname, status)
return {
'timestamp': int(time.time()),
'event_type': 'elasticsearch',
'host': hostname,
'msg_text': msg,
'msg_title': msg_title,
'alert_type': alert_type,
'source_type_name': "elasticsearch",
'event_object': hostname,
'tags': tags
}
|
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for network_setup.py module."""
import os
import shutil
import subprocess
import tempfile
from google_compute_engine.network_setup import network_setup
from google_compute_engine.test_compat import builtin
from google_compute_engine.test_compat import mock
from google_compute_engine.test_compat import unittest
class NetworkSetupTest(unittest.TestCase):
def setUp(self):
# Create a temporary directory.
self.test_dir = tempfile.mkdtemp()
self.mock_logger = mock.Mock()
self.mock_watcher = mock.Mock()
self.mock_ip_forwarding_utils = mock.Mock()
self.mock_network_utils = mock.Mock()
self.metadata_key = 'metadata_key'
self.mock_setup = mock.create_autospec(network_setup.NetworkSetup)
self.mock_setup.logger = self.mock_logger
self.mock_setup.watcher = self.mock_watcher
self.mock_setup.network_utils = self.mock_network_utils
self.mock_setup.network_interfaces = self.metadata_key
self.mock_setup.dhclient_script = '/bin/script'
self.mock_setup.dhcp_command = ''
def tearDown(self):
# Remove the directory after the test.
shutil.rmtree(self.test_dir)
@mock.patch('google_compute_engine.network_setup.network_setup.network_utils')
@mock.patch('google_compute_engine.network_setup.network_setup.metadata_watcher')
@mock.patch('google_compute_engine.network_setup.network_setup.logger')
def testNetworkSetup(self, mock_logger, mock_watcher, mock_network_utils):
mock_logger_instance = mock.Mock()
mock_logger.Logger.return_value = mock_logger_instance
mocks = mock.Mock()
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_watcher, 'watcher')
mocks.attach_mock(mock_network_utils, 'network')
with mock.patch.object(
network_setup.NetworkSetup, '_SetupNetworkInterfaces'):
network_setup.NetworkSetup(debug=True)
expected_calls = [
mock.call.logger.Logger(name=mock.ANY, debug=True, facility=mock.ANY),
mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
mock.call.network.NetworkUtils(logger=mock_logger_instance),
]
self.assertEqual(mocks.mock_calls, expected_calls)
def testModifyInterface(self):
config_file = os.path.join(self.test_dir, 'config.cfg')
config_content = [
'# File comment.\n',
'A="apple"\n',
'B=banana\n',
'B=banana\n',
]
with open(config_file, 'w') as config:
for line in config_content:
config.write(line)
# Write a value for an existing config without overriding it.
network_setup.NetworkSetup._ModifyInterface(
self.mock_setup, config_file, 'A', 'aardvark', replace=False)
self.assertEquals(open(config_file).readlines(), config_content)
# Write a value for a config that is not already set.
network_setup.NetworkSetup._ModifyInterface(
self.mock_setup, config_file, 'C', 'none', replace=False)
config_content.append('C=none\n')
self.assertEquals(open(config_file).readlines(), config_content)
# Write a value for an existing config with replacement.
network_setup.NetworkSetup._ModifyInterface(
self.mock_setup, config_file, 'A', 'aardvark', replace=True)
config_content[1] = 'A=aardvark\n'
self.assertEquals(open(config_file).readlines(), config_content)
# Write a value for an existing config with multiple occurrences.
network_setup.NetworkSetup._ModifyInterface(
self.mock_setup, config_file, 'B', '"banana"', replace=True)
config_content[2] = config_content[3] = 'B="banana"\n'
self.assertEquals(open(config_file).readlines(), config_content)
@mock.patch('google_compute_engine.network_setup.network_setup.os.path.exists')
def testDisableNetworkManager(self, mock_exists):
mock_open = mock.mock_open()
mocks = mock.Mock()
mocks.attach_mock(mock_exists, 'exists')
mocks.attach_mock(mock_open, 'open')
mocks.attach_mock(self.mock_logger, 'logger')
mocks.attach_mock(self.mock_setup._ModifyInterface, 'modify')
mock_exists.side_effect = [True, False]
with mock.patch('%s.open' % builtin, mock_open, create=False):
network_setup.NetworkSetup._DisableNetworkManager(
self.mock_setup, ['eth0', 'eth1'])
expected_calls = [
mock.call.exists('/etc/sysconfig/network-scripts/ifcfg-eth0'),
mock.call.modify(mock.ANY, 'DEVICE', 'eth0', replace=False),
mock.call.modify(mock.ANY, 'NM_CONTROLLED', 'no', replace=True),
mock.call.exists('/etc/sysconfig/network-scripts/ifcfg-eth1'),
mock.call.open('/etc/sysconfig/network-scripts/ifcfg-eth1', 'w'),
mock.call.open().__enter__(),
mock.call.open().write(mock.ANY),
mock.call.open().__exit__(None, None, None),
mock.call.logger.info(mock.ANY, 'eth1'),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.network_setup.network_setup.subprocess.check_call')
@mock.patch('google_compute_engine.network_setup.network_setup.os.path.exists')
def testConfigureNetwork(self, mock_exists, mock_call):
mocks = mock.Mock()
mocks.attach_mock(mock_exists, 'exists')
mocks.attach_mock(mock_call, 'call')
mocks.attach_mock(self.mock_logger, 'logger')
mock_exists.side_effect = [True, False, False]
mock_call.side_effect = [
None, None, None, None, subprocess.CalledProcessError(1, 'Test')]
network_setup.NetworkSetup._ConfigureNetwork(self.mock_setup, ['a', 'b'])
network_setup.NetworkSetup._ConfigureNetwork(self.mock_setup, ['c'])
network_setup.NetworkSetup._ConfigureNetwork(self.mock_setup, [])
expected_calls = [
# Successfully configure the network using a managed dhclient script.
mock.call.logger.info(mock.ANY, ['a', 'b']),
mock.call.exists('/bin/script'),
mock.call.call(['dhclient', '-sf', '/bin/script', '-x', 'a', 'b']),
mock.call.call(['dhclient', '-sf', '/bin/script', 'a', 'b']),
# Successfully configure the network using the default dhclient script.
mock.call.logger.info(mock.ANY, ['c']),
mock.call.exists('/bin/script'),
mock.call.call(['dhclient', '-x', 'c']),
mock.call.call(['dhclient', 'c']),
# Exception while enabling the network interface.
mock.call.logger.info(mock.ANY, []),
mock.call.exists('/bin/script'),
mock.call.call(['dhclient', '-x']),
mock.call.logger.warning(mock.ANY, []),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.network_setup.network_setup.subprocess.check_call')
@mock.patch('google_compute_engine.network_setup.network_setup.os.path.exists')
def testEnableNetworkInterfaces(self, mock_exists, mock_call):
mocks = mock.Mock()
mocks.attach_mock(mock_exists, 'exists')
mocks.attach_mock(mock_call, 'call')
mocks.attach_mock(self.mock_logger, 'logger')
mocks.attach_mock(self.mock_setup._DisableNetworkManager, 'disable')
mocks.attach_mock(self.mock_setup._ConfigureNetwork, 'configure')
mock_exists.side_effect = [True, False]
mock_call.side_effect = [None, subprocess.CalledProcessError(1, 'Test')]
# Return immediately with fewer than two interfaces.
network_setup.NetworkSetup._EnableNetworkInterfaces(self.mock_setup, None)
network_setup.NetworkSetup._EnableNetworkInterfaces(self.mock_setup, [])
# Enable interfaces with network manager enabled.
network_setup.NetworkSetup._EnableNetworkInterfaces(
self.mock_setup, ['A', 'B'])
# Enable interfaces with network manager is not present.
network_setup.NetworkSetup._EnableNetworkInterfaces(
self.mock_setup, ['C', 'D'])
# Run a user supplied command successfully.
self.mock_setup.dhcp_command = 'success'
network_setup.NetworkSetup._EnableNetworkInterfaces(
self.mock_setup, ['E', 'F'])
# Run a user supplied command and logger error messages.
self.mock_setup.dhcp_command = 'failure'
network_setup.NetworkSetup._EnableNetworkInterfaces(
self.mock_setup, ['G', 'H'])
expected_calls = [
mock.call.exists('/etc/sysconfig/network-scripts'),
mock.call.disable(['A', 'B']),
mock.call.configure(['A', 'B']),
mock.call.exists('/etc/sysconfig/network-scripts'),
mock.call.configure(['C', 'D']),
mock.call.call(['success']),
mock.call.call(['failure']),
mock.call.logger.warning(mock.ANY),
]
self.assertEqual(mocks.mock_calls, expected_calls)
def testSetupNetworkInterfaces(self):
mocks = mock.Mock()
mocks.attach_mock(self.mock_logger, 'logger')
mocks.attach_mock(self.mock_watcher, 'watcher')
mocks.attach_mock(self.mock_network_utils, 'network')
mocks.attach_mock(self.mock_setup, 'setup')
self.mock_watcher.GetMetadata.return_value = [
{'mac': '1'}, {'mac': '2'}, {'mac': '3'}, {}]
self.mock_network_utils.GetNetworkInterface.side_effect = [
'eth0', 'eth1', None, None]
with mock.patch.object(
network_setup.NetworkSetup, '_EnableNetworkInterfaces'):
self.mock_setup.dhcp_command = 'command'
network_setup.NetworkSetup._SetupNetworkInterfaces(self.mock_setup)
expected_calls = [
mock.call.watcher.GetMetadata(
metadata_key=self.metadata_key, recursive=True),
mock.call.network.GetNetworkInterface('1'),
mock.call.network.GetNetworkInterface('2'),
mock.call.network.GetNetworkInterface('3'),
mock.call.logger.warning(mock.ANY, '3'),
mock.call.network.GetNetworkInterface(None),
mock.call.logger.warning(mock.ANY, None),
mock.call.setup._EnableNetworkInterfaces(['eth0', 'eth1']),
]
self.assertEqual(mocks.mock_calls, expected_calls)
|
|
# Webhooks for external integrations.
import re
import string
from typing import Any, Callable, Dict, List, Optional
from django.db.models import Q
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import AnomalousWebhookPayload, UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import Realm, UserProfile, get_user_by_delivery_email
IGNORED_EVENTS = [
"attachment_created",
"issuelink_created",
"issuelink_deleted",
"jira:version_released",
"jira:worklog_updated",
"sprint_closed",
"sprint_started",
"worklog_created",
"worklog_updated",
]
def guess_zulip_user_from_jira(jira_username: str, realm: Realm) -> Optional[UserProfile]:
try:
# Try to find a matching user in Zulip
# We search a user's full name, short name,
# and beginning of email address
user = UserProfile.objects.filter(
Q(full_name__iexact=jira_username) | Q(email__istartswith=jira_username),
is_active=True,
realm=realm,
).order_by("id")[0]
return user
except IndexError:
return None
def convert_jira_markup(content: str, realm: Realm) -> str:
# Attempt to do some simplistic conversion of Jira
# formatting to Markdown, for consumption in Zulip
# Jira uses *word* for bold, we use **word**
content = re.sub(r"\*([^\*]+)\*", r"**\1**", content)
# Jira uses {{word}} for monospacing, we use `word`
content = re.sub(r"{{([^\*]+?)}}", r"`\1`", content)
# Starting a line with bq. block quotes that line
content = re.sub(r"bq\. (.*)", r"> \1", content)
# Wrapping a block of code in {quote}stuff{quote} also block-quotes it
quote_re = re.compile(r"{quote}(.*?){quote}", re.DOTALL)
content = re.sub(quote_re, r"~~~ quote\n\1\n~~~", content)
# {noformat}stuff{noformat} blocks are just code blocks with no
# syntax highlighting
noformat_re = re.compile(r"{noformat}(.*?){noformat}", re.DOTALL)
content = re.sub(noformat_re, r"~~~\n\1\n~~~", content)
# Code blocks are delineated by {code[: lang]} {code}
code_re = re.compile(r"{code[^\n]*}(.*?){code}", re.DOTALL)
content = re.sub(code_re, r"~~~\n\1\n~~~", content)
# Links are of form: [https://www.google.com] or [Link Title|https://www.google.com]
# In order to support both forms, we don't match a | in bare links
content = re.sub(r"\[([^\|~]+?)\]", r"[\1](\1)", content)
# Full links which have a | are converted into a better Markdown link
full_link_re = re.compile(r"\[(?:(?P<title>[^|~]+)\|)(?P<url>[^\]]*)\]")
content = re.sub(full_link_re, r"[\g<title>](\g<url>)", content)
# Try to convert a Jira user mention of format [~username] into a
# Zulip user mention. We don't know the email, just the Jira username,
# so we naively guess at their Zulip account using this
if realm:
mention_re = re.compile("\\[~(.*?)\\]")
for username in mention_re.findall(content):
# Try to look up username
user_profile = guess_zulip_user_from_jira(username, realm)
if user_profile:
replacement = f"**{user_profile.full_name}**"
else:
replacement = f"**{username}**"
content = content.replace(f"[~{username}]", replacement)
return content
def get_in(payload: Dict[str, Any], keys: List[str], default: str = "") -> Any:
try:
for key in keys:
payload = payload[key]
except (AttributeError, KeyError, TypeError):
return default
return payload
def get_issue_string(
payload: Dict[str, Any], issue_id: Optional[str] = None, with_title: bool = False
) -> str:
# Guess the URL as it is not specified in the payload
# We assume that there is a /browse/BUG-### page
# from the REST URL of the issue itself
if issue_id is None:
issue_id = get_issue_id(payload)
if with_title:
text = f"{issue_id}: {get_issue_title(payload)}"
else:
text = issue_id
base_url = re.match(r"(.*)\/rest\/api/.*", get_in(payload, ["issue", "self"]))
if base_url and len(base_url.groups()):
return f"[{text}]({base_url.group(1)}/browse/{issue_id})"
else:
return text
def get_assignee_mention(assignee_email: str, realm: Realm) -> str:
if assignee_email != "":
try:
assignee_name = get_user_by_delivery_email(assignee_email, realm).full_name
except UserProfile.DoesNotExist:
assignee_name = assignee_email
return f"**{assignee_name}**"
return ""
def get_issue_author(payload: Dict[str, Any]) -> str:
return get_in(payload, ["user", "displayName"])
def get_issue_id(payload: Dict[str, Any]) -> str:
if "issue" not in payload:
# Some ancient version of Jira or one of its extensions posts
# comment_created events without an "issue" element. For
# these, the best we can do is extract the Jira-intenral
# issue number and use that in the topic.
#
# Users who want better formatting can upgrade Jira.
return payload["comment"]["self"].split("/")[-3]
return get_in(payload, ["issue", "key"])
def get_issue_title(payload: Dict[str, Any]) -> str:
if "issue" not in payload:
# Some ancient version of Jira or one of its extensions posts
# comment_created events without an "issue" element. For
# these, the best we can do is extract the Jira-intenral
# issue number and use that in the topic.
#
# Users who want better formatting can upgrade Jira.
return "Upgrade Jira to get the issue title here."
return get_in(payload, ["issue", "fields", "summary"])
def get_issue_subject(payload: Dict[str, Any]) -> str:
return f"{get_issue_id(payload)}: {get_issue_title(payload)}"
def get_sub_event_for_update_issue(payload: Dict[str, Any]) -> str:
sub_event = payload.get("issue_event_type_name", "")
if sub_event == "":
if payload.get("comment"):
return "issue_commented"
elif payload.get("transition"):
return "issue_transited"
return sub_event
def get_event_type(payload: Dict[str, Any]) -> Optional[str]:
event = payload.get("webhookEvent")
if event is None and payload.get("transition"):
event = "jira:issue_updated"
return event
def add_change_info(content: str, field: str, from_field: str, to_field: str) -> str:
content += f"* Changed {field}"
if from_field:
content += f" from **{from_field}**"
if to_field:
content += f" to {to_field}\n"
return content
def handle_updated_issue_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
# Reassigned, commented, reopened, and resolved events are all bundled
# into this one 'updated' event type, so we try to extract the meaningful
# event that happened
issue_id = get_in(payload, ["issue", "key"])
issue = get_issue_string(payload, issue_id, True)
assignee_email = get_in(payload, ["issue", "fields", "assignee", "emailAddress"], "")
assignee_mention = get_assignee_mention(assignee_email, user_profile.realm)
if assignee_mention != "":
assignee_blurb = f" (assigned to {assignee_mention})"
else:
assignee_blurb = ""
sub_event = get_sub_event_for_update_issue(payload)
if "comment" in sub_event:
if sub_event == "issue_commented":
verb = "commented on"
elif sub_event == "issue_comment_edited":
verb = "edited a comment on"
else:
verb = "deleted a comment from"
if payload.get("webhookEvent") == "comment_created":
author = payload["comment"]["author"]["displayName"]
else:
author = get_issue_author(payload)
content = f"{author} {verb} {issue}{assignee_blurb}"
comment = get_in(payload, ["comment", "body"])
if comment:
comment = convert_jira_markup(comment, user_profile.realm)
content = f"{content}:\n\n``` quote\n{comment}\n```"
else:
content = f"{content}."
else:
content = f"{get_issue_author(payload)} updated {issue}{assignee_blurb}:\n\n"
changelog = get_in(payload, ["changelog"])
if changelog != "":
# Use the changelog to display the changes, whitelist types we accept
items = changelog.get("items")
for item in items:
field = item.get("field")
if field == "assignee" and assignee_mention != "":
target_field_string = assignee_mention
else:
# Convert a user's target to a @-mention if possible
target_field_string = "**{}**".format(item.get("toString"))
from_field_string = item.get("fromString")
if target_field_string or from_field_string:
content = add_change_info(
content, field, from_field_string, target_field_string
)
elif sub_event == "issue_transited":
from_field_string = get_in(payload, ["transition", "from_status"])
target_field_string = "**{}**".format(get_in(payload, ["transition", "to_status"]))
if target_field_string or from_field_string:
content = add_change_info(content, "status", from_field_string, target_field_string)
return content
def handle_created_issue_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
template = """
{author} created {issue_string}:
* **Priority**: {priority}
* **Assignee**: {assignee}
""".strip()
return template.format(
author=get_issue_author(payload),
issue_string=get_issue_string(payload, with_title=True),
priority=get_in(payload, ["issue", "fields", "priority", "name"]),
assignee=get_in(payload, ["issue", "fields", "assignee", "displayName"], "no one"),
)
def handle_deleted_issue_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
template = "{author} deleted {issue_string}{punctuation}"
title = get_issue_title(payload)
punctuation = "." if title[-1] not in string.punctuation else ""
return template.format(
author=get_issue_author(payload),
issue_string=get_issue_string(payload, with_title=True),
punctuation=punctuation,
)
def normalize_comment(comment: str) -> str:
# Here's how Jira escapes special characters in their payload:
# ,.?\\!\n\"'\n\\[]\\{}()\n@#$%^&*\n~`|/\\\\
# for some reason, as of writing this, ! has two '\' before it.
normalized_comment = comment.replace("\\!", "!")
return normalized_comment
def handle_comment_created_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
title = get_issue_title(payload)
return '{author} commented on issue: *"{title}"\
*\n``` quote\n{comment}\n```\n'.format(
author=payload["comment"]["author"]["displayName"],
title=title,
comment=normalize_comment(payload["comment"]["body"]),
)
def handle_comment_updated_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
title = get_issue_title(payload)
return '{author} updated their comment on issue: *"{title}"\
*\n``` quote\n{comment}\n```\n'.format(
author=payload["comment"]["author"]["displayName"],
title=title,
comment=normalize_comment(payload["comment"]["body"]),
)
def handle_comment_deleted_event(payload: Dict[str, Any], user_profile: UserProfile) -> str:
title = get_issue_title(payload)
return '{author} deleted their comment on issue: *"{title}"\
*\n``` quote\n~~{comment}~~\n```\n'.format(
author=payload["comment"]["author"]["displayName"],
title=title,
comment=normalize_comment(payload["comment"]["body"]),
)
JIRA_CONTENT_FUNCTION_MAPPER: Dict[str, Optional[Callable[[Dict[str, Any], UserProfile], str]]] = {
"jira:issue_created": handle_created_issue_event,
"jira:issue_deleted": handle_deleted_issue_event,
"jira:issue_updated": handle_updated_issue_event,
"comment_created": handle_comment_created_event,
"comment_updated": handle_comment_updated_event,
"comment_deleted": handle_comment_deleted_event,
}
ALL_EVENT_TYPES = list(JIRA_CONTENT_FUNCTION_MAPPER.keys())
@webhook_view("Jira", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_jira_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
event = get_event_type(payload)
if event in IGNORED_EVENTS:
return json_success()
if event is None:
raise AnomalousWebhookPayload()
if event is not None:
content_func = JIRA_CONTENT_FUNCTION_MAPPER.get(event)
if content_func is None:
raise UnsupportedWebhookEventType(event)
subject = get_issue_subject(payload)
content: str = content_func(payload, user_profile)
check_send_webhook_message(
request, user_profile, subject, content, event, unquote_url_parameters=True
)
return json_success()
|
|
import heapq
from random import random
from warnings import warn
# --- PRIORITY QUEUE ----------------------------------------------------------------------------------
# Currently not in use.
INFINITY = 1e20
class priorityqueue(dict):
def push(self, e, w):
self[e] = w
def pop(self):
p, w = None, float(INFINITY)
for e in self:
if self[e] <= w:
p, w = e, self[e]
if p:
del self[p]
return p
# --- DEPTH-FIRST SEARCH ------------------------------------------------------------------------------
def depth_first_search(root, visit=lambda node: False, traversable=lambda node, edge: True):
""" Simple, multi-purpose depth-first search.
Visits all the nodes connected to the root, depth-first.
The visit function is called on each node.
Recursion will stop if it returns True, and ubsequently dfs() will return True.
The traversable function takes the current node and edge,
and returns True if we are allowed to follow this connection to the next node.
For example, the traversable for directed edges is follows:
lambda node, edge: node == edge.node1
Note: node._visited is expected to be False for all nodes.
"""
stop = visit(root)
root._visited = True
for node in root.links:
if stop:
return True
if not traversable(root, root.links.edge(node)):
continue
if not node._visited:
stop = depth_first_search(node, visit, traversable)
return stop
# --- ADJACENCY LIST ----------------------------------------------------------------------------------
def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):
""" An edge weight map indexed by node id's.
A dictionary indexed by node id1's in which each value is a
dictionary of connected node id2's linking to the edge weight.
If directed, edges go from id1 to id2, but not the other way.
If stochastic, all the weights for the neighbors of a given node sum to 1.
A heuristic can be a function that takes two node id's and returns
an additional cost for movement between the two nodes.
"""
v = {}
for n in graph.nodes:
v[n.id] = {}
for e in graph.edges:
id1 = e.node1.id
id2 = e.node2.id
if reversed:
id1, id2 = id2, id1
# if not v.has_key(id1): v[id1] = {}
# if not v.has_key(id2): v[id2] = {}
v[id1][id2] = 1.0 - e.weight*0.5
if heuristic:
v[id1][id2] += heuristic(id1, id2)
if not directed:
v[id2][id1] = v[id1][id2]
if stochastic:
for id1 in v:
d = sum(v[id1].values())
for id2 in v[id1]:
v[id1][id2] /= d
return v
# --- DIJKSTRA SHORTEST PATH --------------------------------------------------------------------------
def dijkstra_shortest_path(graph, id1, id2, heuristic=None, directed=False):
""" Dijkstra algorithm for finding shortest paths.
Connelly Barnes, http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
Raises an IndexError between nodes on unconnected graphs.
"""
G = adjacency(graph, directed=directed, heuristic=heuristic)
start = id1
end = id2
# Flatten linked list of form [0,[1,[2,[]]]]
def flatten(L):
while len(L) > 0:
yield L[0]
L = L[1]
q = [(0, start, ())] # Heap of (cost, path_head, path_rest).
visited = set() # Visited vertices.
while True:
(cost1, v1, path) = heapq.heappop(q)
if v1 not in visited:
visited.add(v1)
if v1 == end:
return list(flatten(path))[::-1] + [v1]
path = (v1, path)
for (v2, cost2) in G[v1].iteritems():
if v2 not in visited:
heapq.heappush(q, (cost1 + cost2, v2, path))
# --- BRANDES BETWEENNESS CENTRALITY ------------------------------------------------------------------
def brandes_betweenness_centrality(graph, normalized=True, directed=False):
""" Betweenness centrality for nodes in the graph.
Betweenness centrality is a measure of the number of shortests paths that pass through a node.
Nodes in high-density areas will get a good score.
The algorithm is Brandes' betweenness centrality,
from NetworkX 0.35.1: Aric Hagberg, Dan Schult and Pieter Swart,
based on Dijkstra's algorithm for shortest paths modified from Eppstein.
https://networkx.lanl.gov/wiki
"""
G = graph.keys()
W = adjacency(graph, directed=directed)
betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
for s in G:
S = []
P = {}
for v in G:
P[v] = []
sigma = dict.fromkeys(G, 0) # sigma[v]=0 for v in G
D = {}
sigma[s] = 1
seen = {s: 0}
Q = [] # use Q as heap with (distance, node id) tuples
heapq.heappush(Q, (0, s, s))
while Q:
(dist, pred, v) = heapq.heappop(Q)
if v in D:
continue # already searched this node
sigma[v] = sigma[v] + sigma[pred] # count paths
S.append(v)
D[v] = seen[v]
for w in graph[v].links:
w = w.id
vw_dist = D[v] + W[v][w]
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
heapq.heappush(Q, (vw_dist, v, w))
P[w] = [v]
elif vw_dist == seen[w]: # handle equal paths
sigma[w] = sigma[w] + sigma[v]
P[w].append(v)
delta = dict.fromkeys(G, 0)
while S:
w = S.pop()
for v in P[w]:
delta[v] = delta[v] + (float(sigma[v]) / float(sigma[w])) * (1.0 + delta[w])
if w != s:
betweenness[w] = betweenness[w] + delta[w]
# -----------------------------------
if normalized:
# Normalize between 0.0 and 1.0.
m = max(betweenness.values())
if m == 0:
m = 1
else:
m = 1
betweenness = dict([(id, p/m) for id, p in betweenness.iteritems()])
return betweenness
# --- EIGENVECTOR CENTRALITY --------------------------------------------------------------------------
class NoConvergenceError(Exception): pass
def eigenvector_centrality(graph, normalized=True, reversed=True, rating={},
start=None, iterations=100, tolerance=0.0001):
""" Eigenvector centrality for nodes in the graph (like Google's PageRank).
Eigenvector centrality is a measure of the importance of a node in a directed network.
It rewards nodes with a high potential of (indirectly) connecting to high-scoring nodes.
Nodes with no incoming connections have a score of zero.
If you want to measure outgoing connections, reversed should be False.
The eigenvector calculation is done by the power iteration method.
It has no guarantee of convergence.
A starting vector for the power iteration can be given in the start dict.
You can adjust the importance of a node with the rating dictionary,
which links node id's to a score.
The algorithm is adapted from NetworkX, Aric Hagberg (hagberg@lanl.gov):
https://networkx.lanl.gov/attachment/ticket/119/eigenvector_centrality.py
"""
G = graph.keys()
W = adjacency(graph, directed=True, reversed=reversed)
def _normalize(x):
s = sum(x.values())
if s != 0:
s = 1.0 / s
for k in x:
x[k] *= s
x = start
if x is None:
x = dict([(n, random()) for n in G])
_normalize(x)
# Power method: y = Ax multiplication.
for i in range(iterations):
x0 = x
x = dict.fromkeys(x0.keys(), 0)
for n in x:
for nbr in W[n]:
r = 1
if n in rating:
r = rating[n]
x[n] += 0.01 + x0[nbr] * W[n][nbr] * r
_normalize(x)
e = sum([abs(x[n]-x0[n]) for n in x])
if e < len(graph.nodes) * tolerance:
if normalized:
# Normalize between 0.0 and 1.0.
m = max(x.values())
if m == 0:
m = 1
x = dict([(id, w/m) for id, w in x.iteritems()])
return x
# raise NoConvergenceError
warn("node weight is 0 because eigenvector_centrality() did not converge.", Warning)
return dict([(n, 0) for n in G])
|
|
# Copyright 2010-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.common.exception import PollTimeOut
from trove.common import instance as rd_instance
from trove.tests.fakes.common import authorize
import collections
import eventlet
import uuid
LOG = logging.getLogger(__name__)
FAKE_HOSTS = ["fake_host_1", "fake_host_2"]
class FakeFlavor(object):
def __init__(self, id, disk, name, ram, ephemeral=0, vcpus=10):
self.id = id
self.disk = disk
self.name = name
self.ram = ram
self.vcpus = vcpus
self.ephemeral = ephemeral
@property
def links(self):
url = ("http://localhost:8774/v2/5064d71eb09c47e1956cf579822bae9a/"
"flavors/%s") % self.id
return [{"href": url, "rel": link_type}
for link_type in ['self', 'bookmark']]
@property
def href_suffix(self):
return "flavors/%s" % self.id
class FakeFlavors(object):
def __init__(self):
self.db = {}
self._add(1, 0, "m1.tiny", 512)
self._add(2, 20, "m1.small", 2048)
self._add(3, 40, "m1.medium", 4096)
self._add(4, 80, "m1.large", 8192)
self._add(5, 160, "m1.xlarge", 16384)
self._add(6, 0, "m1.nano", 64)
self._add(7, 0, "m1.micro", 128)
self._add(8, 2, "m1.rd-smaller", 768)
self._add(9, 10, "tinier", 506)
self._add(10, 2, "m1.rd-tiny", 512)
self._add(11, 0, "eph.rd-tiny", 512, 1)
self._add(12, 20, "eph.rd-smaller", 768, 2)
self._add("custom", 25, "custom.small", 512, 1)
# self._add(13, 20, "m1.heat", 512)
def _add(self, *args, **kwargs):
new_flavor = FakeFlavor(*args, **kwargs)
self.db[new_flavor.id] = new_flavor
def get(self, id):
try:
id = int(id)
except ValueError:
pass
if id not in self.db:
raise nova_exceptions.NotFound(404, "Flavor id not found %s" % id)
return self.db[id]
def get_by_href(self, href):
for id in self.db:
value = self.db[id]
# Use inexact match since faking the exact endpoints would be
# difficult.
if href.endswith(value.href_suffix):
return value
raise nova_exceptions.NotFound(404, "Flavor href not found %s" % href)
def list(self):
return [self.get(id) for id in self.db]
class FakeServer(object):
next_local_id = 0
def __init__(self, parent, owner, id, name, image_id, flavor_ref,
block_device_mapping, volumes):
self.owner = owner # This is a context.
self.id = id
self.parent = parent
self.name = name
self.image_id = image_id
self.flavor_ref = flavor_ref
self.old_flavor_ref = None
self._current_status = "BUILD"
self.volumes = volumes
# This is used by "RdServers". Its easier to compute the
# fake value in this class's initializer.
self._local_id = self.next_local_id
self.next_local_id += 1
info_vols = []
for volume in self.volumes:
info_vols.append({'id': volume.id})
volume.set_attachment(id)
volume.schedule_status("in-use", 1)
self.host = FAKE_HOSTS[0]
self.old_host = None
setattr(self, 'OS-EXT-AZ:availability_zone', 'nova')
self._info = {'os:volumes': info_vols}
@property
def addresses(self):
return {"private": [{"addr": "123.123.123.123"}]}
def confirm_resize(self):
if self.status != "VERIFY_RESIZE":
raise RuntimeError("Not in resize confirm mode.")
self._current_status = "ACTIVE"
def revert_resize(self):
if self.status != "VERIFY_RESIZE":
raise RuntimeError("Not in resize confirm mode.")
self.host = self.old_host
self.old_host = None
self.flavor_ref = self.old_flavor_ref
self.old_flavor_ref = None
self._current_status = "ACTIVE"
def reboot(self):
LOG.debug("Rebooting server %s" % (self.id))
def set_to_active():
self._current_status = "ACTIVE"
self.parent.schedule_simulate_running_server(self.id, 1.5)
self._current_status = "REBOOT"
eventlet.spawn_after(1, set_to_active)
def delete(self):
self.schedule_status = []
# TODO(pdmars): This is less than ideal, but a quick way to force it
# into the error state before scheduling the delete.
if (self.name.endswith("_ERROR_ON_DELETE") and
self._current_status != "SHUTDOWN"):
# Fail to delete properly the first time, just set the status
# to SHUTDOWN and break. It's important that we only fail to delete
# once in fake mode.
self._current_status = "SHUTDOWN"
return
self._current_status = "SHUTDOWN"
self.parent.schedule_delete(self.id, 1.5)
@property
def flavor(self):
return FLAVORS.get_by_href(self.flavor_ref).__dict__
@property
def links(self):
url = "https://localhost:9999/v1.0/1234/instances/%s" % self.id
return [{"href": url, "rel": link_type}
for link_type in ['self', 'bookmark']]
def migrate(self, force_host=None):
self.resize(None, force_host)
def resize(self, new_flavor_id=None, force_host=None):
self._current_status = "RESIZE"
if self.name.endswith("_RESIZE_TIMEOUT"):
raise PollTimeOut()
def set_to_confirm_mode():
self._current_status = "VERIFY_RESIZE"
def set_to_active():
self.parent.schedule_simulate_running_server(self.id, 1.5)
eventlet.spawn_after(1, set_to_active)
def change_host():
self.old_host = self.host
if not force_host:
self.host = [host for host in FAKE_HOSTS
if host != self.host][0]
else:
self.host = force_host
def set_flavor():
if self.name.endswith("_RESIZE_ERROR"):
self._current_status = "ACTIVE"
return
if new_flavor_id is None:
# Migrations are flavorless flavor resizes.
# A resize MIGHT change the host, but a migrate
# deliberately does.
LOG.debug("Migrating fake instance.")
eventlet.spawn_after(0.75, change_host)
else:
LOG.debug("Resizing fake instance.")
self.old_flavor_ref = self.flavor_ref
flavor = self.parent.flavors.get(new_flavor_id)
self.flavor_ref = flavor.links[0]['href']
eventlet.spawn_after(1, set_to_confirm_mode)
eventlet.spawn_after(0.8, set_flavor)
def schedule_status(self, new_status, time_from_now):
"""Makes a new status take effect at the given time."""
def set_status():
self._current_status = new_status
eventlet.spawn_after(time_from_now, set_status)
@property
def status(self):
return self._current_status
@property
def created(self):
return "2012-01-25T21:55:51Z"
@property
def updated(self):
return "2012-01-25T21:55:51Z"
@property
def tenant(self): # This is on the RdServer extension type.
return self.owner.tenant
@property
def tenant_id(self):
return self.owner.tenant
# The global var contains the servers dictionary in use for the life of these
# tests.
FAKE_SERVERS_DB = {}
class FakeServers(object):
def __init__(self, context, flavors):
self.context = context
self.db = FAKE_SERVERS_DB
self.flavors = flavors
def can_see(self, id):
"""Can this FakeServers, with its context, see some resource?"""
server = self.db[id]
return (self.context.is_admin or
server.owner.tenant == self.context.tenant)
def create(self, name, image_id, flavor_ref, files=None, userdata=None,
block_device_mapping=None, volume=None, security_groups=None,
availability_zone=None, nics=None, config_drive=False):
id = "FAKE_%s" % uuid.uuid4()
if volume:
volume = self.volumes.create(volume['size'], volume['name'],
volume['description'])
while volume.status == "BUILD":
eventlet.sleep(0.1)
if volume.status != "available":
LOG.info(_("volume status = %s") % volume.status)
raise nova_exceptions.ClientException("Volume was bad!")
mapping = "%s::%s:%s" % (volume.id, volume.size, 1)
block_device_mapping = {'vdb': mapping}
volumes = [volume]
LOG.debug("Fake Volume Create %(volumeid)s with "
"status %(volumestatus)s" %
{'volumeid': volume.id, 'volumestatus': volume.status})
else:
volumes = self._get_volumes_from_bdm(block_device_mapping)
for volume in volumes:
volume.schedule_status('in-use', 1)
server = FakeServer(self, self.context, id, name, image_id, flavor_ref,
block_device_mapping, volumes)
self.db[id] = server
if name.endswith('SERVER_ERROR'):
raise nova_exceptions.ClientException("Fake server create error.")
if availability_zone == 'BAD_ZONE':
raise nova_exceptions.ClientException("The requested availability "
"zone is not available.")
if nics:
if 'port-id' in nics[0] and nics[0]['port-id'] == "UNKNOWN":
raise nova_exceptions.ClientException("The requested "
"port-id is not "
"available.")
server.schedule_status("ACTIVE", 1)
LOG.info("FAKE_SERVERS_DB : %s" % str(FAKE_SERVERS_DB))
return server
def _get_volumes_from_bdm(self, block_device_mapping):
volumes = []
if block_device_mapping is not None:
# block_device_mapping is a dictionary, where the key is the
# device name on the compute instance and the mapping info is a
# set of fields in a string, separated by colons.
# For each device, find the volume, and record the mapping info
# to another fake object and attach it to the volume
# so that the fake API can later retrieve this.
for device in block_device_mapping:
mapping = block_device_mapping[device]
(id, _type, size, delete_on_terminate) = mapping.split(":")
volume = self.volumes.get(id)
volume.mapping = FakeBlockDeviceMappingInfo(
id, device, _type, size, delete_on_terminate)
volumes.append(volume)
return volumes
def get(self, id):
if id not in self.db:
LOG.error(_("Couldn't find server id %(id)s, collection=%(db)s") %
{'id': id, 'db': self.db})
raise nova_exceptions.NotFound(404, "Not found")
else:
if self.can_see(id):
return self.db[id]
else:
raise nova_exceptions.NotFound(404, "Bad permissions")
def get_server_volumes(self, server_id):
"""Fake method we've added to grab servers from the volume."""
return [volume.mapping
for volume in self.get(server_id).volumes
if volume.mapping is not None]
def list(self):
return [v for (k, v) in self.db.items() if self.can_see(v.id)]
def schedule_delete(self, id, time_from_now):
def delete_server():
LOG.info(_("Simulated event ended, deleting server %s.") % id)
del self.db[id]
eventlet.spawn_after(time_from_now, delete_server)
def schedule_simulate_running_server(self, id, time_from_now):
from trove.instance.models import DBInstance
from trove.instance.models import InstanceServiceStatus
def set_server_running():
instance = DBInstance.find_by(compute_instance_id=id)
LOG.debug("Setting server %s to running" % instance.id)
status = InstanceServiceStatus.find_by(instance_id=instance.id)
status.status = rd_instance.ServiceStatuses.RUNNING
status.save()
eventlet.spawn_after(time_from_now, set_server_running)
class FakeRdServer(object):
def __init__(self, server):
self.server = server
self.deleted = False
self.deleted_at = None # Not sure how to simulate "True" for this.
self.local_id = server._local_id
def __getattr__(self, name):
return getattr(self.server, name)
class FakeRdServers(object):
def __init__(self, servers):
self.servers = servers
def get(self, id):
return FakeRdServer(self.servers.get(id))
def list(self):
# Attach the extra Rd Server stuff to the normal server.
return [FakeRdServer(server) for server in self.servers.list()]
class FakeServerVolumes(object):
def __init__(self, context):
self.context = context
def get_server_volumes(self, server_id):
class ServerVolumes(object):
def __init__(self, block_device_mapping):
LOG.debug("block_device_mapping = %s" %
block_device_mapping)
device = block_device_mapping['vdb']
(self.volumeId,
self.type,
self.size,
self.delete_on_terminate) = device.split(":")
fake_servers = FakeServers(self.context, FLAVORS)
server = fake_servers.get(server_id)
return [ServerVolumes(server.block_device_mapping)]
class FakeVolume(object):
def __init__(self, parent, owner, id, size, name,
description, volume_type):
self.attachments = []
self.parent = parent
self.owner = owner # This is a context.
self.id = id
self.size = size
self.name = name
self.description = description
self._current_status = "BUILD"
# For some reason we grab this thing from device then call it mount
# point.
self.device = "vdb"
self.volume_type = volume_type
def __repr__(self):
msg = ("FakeVolume(id=%s, size=%s, name=%s, "
"description=%s, _current_status=%s)")
params = (self.id, self.size, self.name,
self.description, self._current_status)
return (msg % params)
@property
def availability_zone(self):
return "fake-availability-zone"
@property
def created_at(self):
return "2001-01-01-12:30:30"
def get(self, key):
return getattr(self, key)
def schedule_status(self, new_status, time_from_now):
"""Makes a new status take effect at the given time."""
def set_status():
self._current_status = new_status
eventlet.spawn_after(time_from_now, set_status)
def set_attachment(self, server_id):
"""Fake method we've added to set attachments. Idempotent."""
for attachment in self.attachments:
if attachment['server_id'] == server_id:
return # Do nothing
self.attachments.append({'server_id': server_id,
'device': self.device})
@property
def status(self):
return self._current_status
class FakeBlockDeviceMappingInfo(object):
def __init__(self, id, device, _type, size, delete_on_terminate):
self.volumeId = id
self.device = device
self.type = _type
self.size = size
self.delete_on_terminate = delete_on_terminate
FAKE_VOLUMES_DB = {}
class FakeVolumes(object):
def __init__(self, context):
self.context = context
self.db = FAKE_VOLUMES_DB
def can_see(self, id):
"""Can this FakeVolumes, with its context, see some resource?"""
server = self.db[id]
return (self.context.is_admin or
server.owner.tenant == self.context.tenant)
def get(self, id):
if id not in self.db:
LOG.error(_("Couldn't find volume id %(id)s, collection=%(db)s") %
{'id': id, 'db': self.db})
raise nova_exceptions.NotFound(404, "Not found")
else:
if self.can_see(id):
return self.db[id]
else:
raise nova_exceptions.NotFound(404, "Bad permissions")
def create(self, size, name=None, description=None, volume_type=None):
id = "FAKE_VOL_%s" % uuid.uuid4()
volume = FakeVolume(self, self.context, id, size, name,
description, volume_type)
self.db[id] = volume
if size == 9:
volume.schedule_status("error", 2)
elif size == 13:
raise Exception("No volume for you!")
else:
volume.schedule_status("available", 2)
LOG.debug("Fake volume created %(volumeid)s with "
"status %(volumestatus)s" %
{'volumeid': volume.id, 'volumestatus': volume.status})
LOG.info("FAKE_VOLUMES_DB : %s" % FAKE_VOLUMES_DB)
return volume
def list(self, detailed=True):
return [self.db[key] for key in self.db]
def extend(self, volume_id, new_size):
LOG.debug("Resize volume id (%(volumeid)s) to size (%(size)s)" %
{'volumeid': volume_id, 'size': new_size})
volume = self.get(volume_id)
if volume._current_status != 'available':
raise Exception("Invalid volume status: "
"expected 'in-use' but was '%s'" %
volume._current_status)
def finish_resize():
volume.size = new_size
eventlet.spawn_after(1.0, finish_resize)
def delete_server_volume(self, server_id, volume_id):
volume = self.get(volume_id)
if volume._current_status != 'in-use':
raise Exception("Invalid volume status: "
"expected 'in-use' but was '%s'" %
volume._current_status)
def finish_detach():
volume._current_status = "available"
eventlet.spawn_after(1.0, finish_detach)
def create_server_volume(self, server_id, volume_id, device_path):
volume = self.get(volume_id)
if volume._current_status != "available":
raise Exception("Invalid volume status: "
"expected 'available' but was '%s'" %
volume._current_status)
def finish_attach():
volume._current_status = "in-use"
eventlet.spawn_after(1.0, finish_attach)
class FakeAccount(object):
def __init__(self, id, servers):
self.id = id
self.servers = self._servers_to_dict(servers)
def _servers_to_dict(self, servers):
ret = []
for server in servers:
server_dict = {}
server_dict['id'] = server.id
server_dict['name'] = server.name
server_dict['status'] = server.status
server_dict['host'] = server.host
ret.append(server_dict)
return ret
class FakeAccounts(object):
def __init__(self, context, servers):
self.context = context
self.db = FAKE_SERVERS_DB
self.servers = servers
def _belongs_to_tenant(self, tenant, id):
server = self.db[id]
return server.tenant == tenant
def get_instances(self, id):
authorize(self.context)
servers = [v for (k, v) in self.db.items()
if self._belongs_to_tenant(id, v.id)]
return FakeAccount(id, servers)
FLAVORS = FakeFlavors()
class FakeHost(object):
def __init__(self, name, servers):
self.name = name
self.servers = servers
self.instances = []
self.percentUsed = 0
self.totalRAM = 0
self.usedRAM = 0
@property
def instanceCount(self):
return len(self.instances)
def recalc(self):
"""
This fake-mode exclusive method recalculates the fake data this
object passes back.
"""
self.instances = []
self.percentUsed = 0
self.totalRAM = 32000 # 16384
self.usedRAM = 0
for server in self.servers.list():
print(server)
if server.host != self.name:
print("\t...not on this host.")
continue
self.instances.append({
'uuid': server.id,
'name': server.name,
'status': server.status
})
if (str(server.flavor_ref).startswith('http:') or
str(server.flavor_ref).startswith('https:')):
flavor = FLAVORS.get_by_href(server.flavor_ref)
else:
flavor = FLAVORS.get(server.flavor_ref)
ram = flavor.ram
self.usedRAM += ram
decimal = float(self.usedRAM) / float(self.totalRAM)
self.percentUsed = int(decimal * 100)
class FakeHosts(object):
def __init__(self, servers):
# Use an ordered dict to make the results of the fake api call
# return in the same order for the example generator.
self.hosts = collections.OrderedDict()
for host in FAKE_HOSTS:
self.add_host(FakeHost(host, servers))
def add_host(self, host):
self.hosts[host.name] = host
return host
def get(self, name):
try:
self.hosts[name].recalc()
return self.hosts[name]
except KeyError:
raise nova_exceptions.NotFound(404, "Host not found %s" % name)
def list(self):
for name in self.hosts:
self.hosts[name].recalc()
return [self.hosts[name] for name in self.hosts]
class FakeRdStorage(object):
def __init__(self, name):
self.name = name
self.type = ""
self.used = 0
self.capacity = {}
self.provision = {}
def recalc(self):
self.type = "test_type"
self.used = 10
self.capacity['total'] = 100
self.capacity['available'] = 90
self.provision['total'] = 50
self.provision['available'] = 40
self.provision['percent'] = 10
class FakeRdStorages(object):
def __init__(self):
self.storages = {}
self.add_storage(FakeRdStorage("fake_storage"))
def add_storage(self, storage):
self.storages[storage.name] = storage
return storage
def list(self):
for name in self.storages:
self.storages[name].recalc()
return [self.storages[name] for name in self.storages]
class FakeSecurityGroup(object):
def __init__(self, name=None, description=None, context=None):
self.name = name
self.description = description
self.id = "FAKE_SECGRP_%s" % uuid.uuid4()
self.rules = {}
def get_id(self):
return self.id
def add_rule(self, fakeSecGroupRule):
self.rules.append(fakeSecGroupRule)
return self.rules
def get_rules(self):
result = ""
for rule in self.rules:
result = result + rule.data()
return result
def data(self):
return {
'id': self.id,
'name': self.name,
'description': self.description
}
class FakeSecurityGroups(object):
def __init__(self, context=None):
self.context = context
self.securityGroups = {}
def create(self, name=None, description=None):
secGrp = FakeSecurityGroup(name, description)
self.securityGroups[secGrp.get_id()] = secGrp
return secGrp
def delete(self, group_id):
pass
def list(self):
pass
class FakeSecurityGroupRule(object):
def __init__(self, ip_protocol=None, from_port=None, to_port=None,
cidr=None, parent_group_id=None, context=None):
self.group_id = parent_group_id
self.protocol = ip_protocol
self.from_port = from_port
self.to_port = to_port
self.cidr = cidr
self.context = context
self.id = "FAKE_SECGRP_RULE_%s" % uuid.uuid4()
def get_id(self):
return self.id
def data(self):
return {
'id': self.id,
'group_id': self.group_id,
'protocol': self.protocol,
'from_port': self.from_port,
'to_port': self.to_port,
'cidr': self.cidr
}
class FakeSecurityGroupRules(object):
def __init__(self, context=None):
self.context = context
self.securityGroupRules = {}
def create(self, parent_group_id, ip_protocol, from_port, to_port, cidr):
secGrpRule = FakeSecurityGroupRule(ip_protocol, from_port, to_port,
cidr, parent_group_id)
self.securityGroupRules[secGrpRule.get_id()] = secGrpRule
return secGrpRule
def delete(self, id):
if id in self.securityGroupRules:
del self.securityGroupRules[id]
class FakeClient(object):
def __init__(self, context):
self.context = context
self.flavors = FLAVORS
self.servers = FakeServers(context, self.flavors)
self.volumes = FakeVolumes(context)
self.servers.volumes = self.volumes
self.accounts = FakeAccounts(context, self.servers)
self.rdhosts = FakeHosts(self.servers)
self.rdstorage = FakeRdStorages()
self.rdservers = FakeRdServers(self.servers)
self.security_groups = FakeSecurityGroups(context)
self.security_group_rules = FakeSecurityGroupRules(context)
def get_server_volumes(self, server_id):
return self.servers.get_server_volumes(server_id)
def rescan_server_volume(self, server, volume_id):
LOG.info("FAKE rescanning volume.")
CLIENT_DATA = {}
def get_client_data(context):
if context not in CLIENT_DATA:
nova_client = FakeClient(context)
volume_client = FakeClient(context)
volume_client.servers = nova_client
CLIENT_DATA[context] = {
'nova': nova_client,
'volume': volume_client
}
return CLIENT_DATA[context]
def fake_create_nova_client(context):
return get_client_data(context)['nova']
def fake_create_nova_volume_client(context):
return get_client_data(context)['volume']
def fake_create_cinder_client(context):
return get_client_data(context)['volume']
|
|
import os
import uuid
import shutil
import sys
import time
import datetime
import traceback
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from sideloader.worker import task_db
from twisted.mail.smtp import sendmail
from twisted.internet import defer, reactor, protocol
from twisted.python import log
from twisted.enterprise import adbapi
from rhumba.plugin import RhumbaPlugin, fork
from rhumba import cron
class BuildProcess(protocol.ProcessProtocol):
def __init__(self, id, prjid, idhash, db, callback):
self.id = id
self.project_id = prjid
self.idhash = idhash
self.db = db
self.data = ""
self.callback = callback
def log(self, msg):
log.msg('[%s] %s' % (self.id, msg))
@defer.inlineCallbacks
def outReceived(self, data):
self.log(data)
self.data = self.data + data
yield self.db.updateBuildLog(self.id, self.data)
@defer.inlineCallbacks
def errReceived(self, data):
self.log(data)
self.data = self.data + data
yield self.db.updateBuildLog(self.id, self.data)
def processEnded(self, reason):
reactor.callLater(0, self.callback, reason.value.exitCode,
self.project_id, self.id, self.idhash)
class Plugin(RhumbaPlugin):
def __init__(self, *a):
RhumbaPlugin.__init__(self, *a)
self.db = task_db.SideloaderDB()
self.build_locks = {}
@defer.inlineCallbacks
def sendEmail(self, to, content, subject):
start = '<html><head></head><body style="font-family:arial,sans-serif;">'
end = '</body></html>'
cont = MIMEText(start+content+end, 'html')
msg = MIMEMultipart('related')
msg['Subject'] = subject
msg['From'] = settings.SIDELOADER_FROM
msg['To'] = to
msg.attach(cont)
fr = settings.SIDELOADER_FROM.split('<')[-1].strip('>')
yield sendmail('localhost', fr, msg['To'], msg.as_string())
@defer.inlineCallbacks
def sendNotification(self, message, project_id):
(name, notify, slack_channel
) = yield self.db.getProjectNotificationSettings(project_id)
if notify:
self.log("Sending notification %s" % repr(message))
if settings.SLACK_TOKEN:
if slack_channel:
channel = slack_channel
else:
channel=settings.SLACK_CHANNEL
sc = slack.SlackClient(settings.SLACK_HOST,
settings.SLACK_TOKEN, channel)
yield sc.message(name + ": " + message)
def sendSignEmail(self, to, name, release, h):
cont = 'A build release has been requested for "%s" to release stream "%s".<br/><br/>' % (name, release)
cont += "You are listed as a contact to approve this release. "
cont += "If you would like to do so please click the link below,"
cont += " if you do not agree then simply ignore this mail.<br/><br/>"
cont += "http://%s/api/rap/%s" % (settings.SIDELOADER_DOMAIN, h)
return self.sendEmail(to, cont, '%s release approval - action required' % name)
def sendScheduleNotification(self, to, release, flow, project):
cont = 'A %s release for %s has been scheduled for %s UTC' % (
flow['name'],
project['name'],
str(release['scheduled'])
)
return self.sendEmail(to, cont, '%s %s release scheduled - %s UTC' % (
project['name'],
flow['name'],
release['scheduled']
))
@defer.inlineCallbacks
def sendBuildEmail(self, to, flow, release):
build = yield self.db.getBuild(release['build_id'])
cont = 'Release %s deployed to %s' % (
build['build_file'], flow['name']
)
yield self.sendEmail(to, cont, cont)
def call_release(self, params):
return self.doRelease(
params['build_id'],
params['flow_id'],
scheduled=params.get('schedule', None)
)
@defer.inlineCallbacks
def doRelease(self, build_id, flow_id, scheduled=None):
build = yield self.db.getBuild(build_id)
flow = yield self.db.getFlow(flow_id)
release_id = yield self.db.createRelease({
'flow_id': flow_id,
'build_id': build_id,
'waiting': True,
'scheduled': scheduled,
'release_date': datetime.datetime.now(),
'lock': False
})
if scheduled:
release = yield self.db.getRelease(release_id)
reactor.callLater(0, self.sendNotification,
'Deployment scheduled for build %s at %s UTC to %s' % (
build['build_file'],
release['scheduled'],
flow['name']
), flow['project_id'])
project = yield self.db.getProject(flow['project_id'])
for name, email in settings.ADMINS:
reactor.callLater(
0, self.sendScheduleNotification, email, release, flow, project)
if flow['require_signoff']:
# Create a signoff release
# Turn whatever junk is in the email text into a list
users = self.db.getFlowSignoffList(flow)
project = yield self.db.getProject(flow['project_id'])
for email in users:
h = uuid.uuid1().get_hex()
so_id = yield self.db.createReleaseSignoff({
'release_id': release_id,
'signature': email,
'idhash': h,
'signed': False
})
reactor.callLater(0, self.sendSignEmail,
email, project['name'], flow['name'], h)
@defer.inlineCallbacks
def pushTargets(self, release, flow):
"""
Pushes a release using Specter
"""
targets = yield self.db.getFlowTargets(flow['id'])
project = yield self.db.getProject(flow['project_id'])
for target in targets:
server = yield self.db.getServer(target['server_id'])
self.log("Deploing release %s to target %s" % (repr(release), server['name']))
build = yield self.db.getBuild(release['build_id'])
yield self.sendNotification('Deployment started for build %s -> %s' % (
build['build_file'],
server['name']
), project['id'])
yield self.db.updateTargetState(target['id'], 1)
sc = specter.SpecterClient(server['name'],
settings.SPECTER_AUTHCODE, settings.SPECTER_SECRET)
if project['package_name']:
package = project['package_name']
else:
url = project['github_url']
package = url.split(':')[1].split('/')[-1][:-4]
url = "%s/%s" % (
settings.SIDELOADER_PACKAGEURL,
build['build_file']
)
stop, start, restart, puppet = "", "", "", ""
try:
if flow['service_pre_stop']:
stop = yield sc.get_all_stop()
stop = stop['stdout']
result = yield sc.post_install({
'package': package,
'url': url
})
if ('error' in result) or (result.get('code',2) > 0) or (
result.get('stderr') and not result.get('stdout')):
# Errors during deployment
yield self.db.updateTargetState(target['id'], 3)
if 'error' in result:
yield self.db.updateTargetLog(target['id'],
'\n'.join([stop, result['error']])
)
else:
yield self.db.updateTargetLog(target['id'],
'\n'.join([
stop, result['stdout'], result['stderr']
])
)
yield self.sendNotification('Deployment of build %s to %s failed!' % (
build['build_file'], server['name']
), project['id'])
# Start services back up even on failure
if flow['service_pre_stop']:
start = yield sc.get_all_start()
start = start['stdout']
else:
if flow['puppet_run']:
puppet = yield sc.get_puppet_run()
puppet = puppet['stdout']
if flow['service_pre_stop']:
start = yield sc.get_all_start()
start = start['stdout']
elif flow['service_restart']:
r1 = yield sc.get_all_stop()
r2 = yield sc.get_all_start()
restart = r1['stdout'] + r2['stdout']
yield self.db.updateTargetState(target['id'], 2)
yield self.db.updateTargetLog(target['id'],
'\n'.join([
stop, result['stdout'], result['stderr'], puppet, start, restart
])
)
yield self.db.updateTargetBuild(target['id'], build['id'])
yield self.sendNotification('Deployment of build %s to %s complete' % (
build['build_file'],
server['name']
), project['id'])
yield self.db.updateServerStatus(server['id'], "Reachable")
except Exception, e:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
yield self.db.updateTargetLog(target['id'], ''.join(lines))
yield self.db.updateTargetState(target['id'], 3)
yield self.db.updateServerStatus(server['id'], ''.join(lines))
yield self.sendNotification('Deployment of build %s to %s failed!' % (
build['build_file'],
server['name']
), project['id'])
yield self.db.updateReleaseState(release['id'])
@defer.inlineCallbacks
def streamRelease(self, release):
build = yield self.db.getBuild(release['build_id'])
flow = yield self.db.getFlow(release['flow_id'])
stream = yield self.db.getReleaseStream(flow['stream_id'])
yield self.sendNotification('Pushing build %s to %s stream' % (
build['build_file'],
stream['name']
), flow['project_id'])
# Stream release
push_cmd = stream['push_command']
result = yield fork('/bin/sh', ('-c', push_cmd % os.path.join(
'/workspace/packages/', build['build_file'])))
yield self.db.updateReleaseState(release['id'])
@defer.inlineCallbacks
def cleanRelease(self, release):
if release['waiting']:
flow = yield self.db.getFlow(release['flow_id'])
next_release = yield self.db.getNextFlowRelease(release['flow_id'])
last_release = yield self.db.getLastFlowRelease(release['flow_id'])
# Cleanup stale releases, deprecated by request date
if next_release:
if release['release_date'] < next_release['release_date']:
yield self.db.updateReleaseState(release['id'])
if last_release:
if release['release_date'] < last_release['release_date']:
yield self.db.updateReleaseState(release['id'])
@defer.inlineCallbacks
def call_runrelease(self, params):
release = yield self.db.getRelease(params['release_id'])
if release['waiting']:
flow = yield self.db.getFlow(release['flow_id'])
signoff = yield self.db.checkReleaseSignoff(release['id'], flow)
if self.db.checkReleaseSchedule(release) and signoff:
yield self.db.updateReleaseLocks(release['id'], True)
addrs = self.db.getFlowNotifyList(flow)
for to in addrs:
reactor.callLater(
0, self.sendBuildEmail, to, flow, release)
# Release the build
if flow['stream_mode'] == 0:
# Stream only
yield self.streamRelease(release)
elif flow['stream_mode'] == 2:
# Stream and targets
yield self.streamRelease(release)
yield self.pushTargets(release, flow)
else:
# Target only
yield self.pushTargets(release, flow)
yield self.db.updateReleaseLocks(release['id'], False)
@cron(secs="*/10")
@defer.inlineCallbacks
def call_checkreleases(self, params):
releases = yield self.db.getReleases(waiting=True, lock=False)
#self.log("Release queue is at %s" % len(releases))
skip = []
# Clean old releases
for release in releases:
yield self.cleanRelease(release)
current = yield self.db.countReleases(
release['flow_id'], waiting=True, lock=True)
if current > 0:
self.log("Skipping release %s on this run - %s in queue" % (
repr(release), current))
skip.append(release['id'])
# Lock all the release objects we now see
r = yield self.db.getReleases(waiting=True, lock=False)
releases = [i for i in r if i['id'] not in skip]
#for release in releases:
# yield self.db.updateReleaseLocks(release['id'], True)
for release in releases:
self.log("Running release %s" % repr(release))
# XXX Use client queue
reactor.callLater(0, self.call_runrelease,
{'release_id': release['id']})
@defer.inlineCallbacks
def endBuild(self, code, project_id, build_id, idhash):
workspace = os.path.join('/workspace', idhash)
package = os.path.join(workspace, 'package')
packages = '/workspace/packages'
del self.build_locks[project_id]
if code != 0:
yield self.db.setBuildState(build_id, 2)
reactor.callLater(0, self.sendNotification,
'Build <http://%s/projects/build/view/%s|#%s> failed' % (
settings.SIDELOADER_DOMAIN, build_id, build_id
), project_id)
else:
if not os.path.exists(packages):
os.makedirs(packages)
debs = [i for i in os.listdir(package) if ((i[-4:]=='.deb') or (i[-4:]=='.rpm'))]
if not debs:
# We must have failed actually
yield self.db.setBuildState(build_id, 2)
reactor.callLater(0, self.sendNotification,
'Build <http://%s/projects/build/view/%s|#%s> failed' % (
settings.SIDELOADER_DOMAIN, build_id, build_id
), project_id)
else:
deb = debs[0]
yield self.db.setBuildState(build_id, 1)
yield self.db.setBuildFile(build_id, deb)
reactor.callLater(0, self.sendNotification,
'Build <http://%s/projects/build/view/%s|#%s> successful' % (
settings.SIDELOADER_DOMAIN, build_id, build_id
),
project_id)
# Relocate the package to our archive
shutil.move(os.path.join(package, deb), os.path.join(packages, deb))
# Find any auto-release streams
# XXX Implement auto flow XXX
flows = yield self.db.getAutoFlows(project_id)
if flows:
for flow in flows:
reactor.callLater(0, self.doRelease, build_id, flow['id'])
@defer.inlineCallbacks
def call_build(self, params):
"""
Use subprocess to execute a build, update the db with results along the way
"""
build_id = params['build_id']
build = yield self.db.getBuild(build_id)
project_id = build['project_id']
if project_id in self.build_locks:
if (time.time() - self.build_locks[project_id]) < 1800:
# Don't build
defer.returnValue(None)
self.build_locks[project_id] = time.time()
project = yield self.db.getProject(project_id)
chunks = project['github_url'].split(':')[1].split('/')
repo = chunks[-1][:-4]
# Get a build number
build_num = yield self.db.getBuildNumber(repo)
build_num += 1
# Increment the project build number
yield self.db.setBuildNumber(repo, build_num)
local = self.config.get('localdir',
os.path.join(os.path.dirname(sys.argv[0]), '../..'))
buildpack = os.path.join(local, 'bin/build_package')
# Figure out some directory paths
if settings.DEBUG:
print "Executing build %s %s" % (project['github_url'], project['branch'])
reactor.callLater(0, self.sendNotification,
'Build <http://%s/projects/build/view/%s|#%s> started for branch %s' % (
settings.SIDELOADER_DOMAIN, build_id, build_id, project['branch']
), project_id)
args = ['build_package', '--branch', project['branch'], '--build', str(build_num), '--id', project['idhash']]
if project['deploy_file']:
args.extend(['--deploy-file', project['deploy_file']])
if project['package_name']:
args.extend(['--name', project['package_name']])
if project['build_script']:
args.extend(['--build-script', project['build_script']])
if project['postinstall_script']:
args.extend(['--postinst-script', project['postinstall_script']])
if project['package_manager']:
args.extend(['--packman', project['package_manager']])
if project['deploy_type']:
args.extend(['--dtype', project['deploy_type']])
args.append(project['github_url'])
self.log('Spawning build %s: %s :: %s %s' % (build_id, local, buildpack, repr(args)))
buildProcess = BuildProcess(build_id, project_id, project['idhash'], self.db, self.endBuild)
proc = reactor.spawnProcess(buildProcess, buildpack, args=args, path=local, env=os.environ)
|
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import heapq
from neutron_lib import constants
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from sqlalchemy import sql
from neutron._i18n import _LI, _LW
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import api as db_api
from neutron.extensions import availability_zone as az_ext
from neutron.scheduler import base_resource_filter
from neutron.scheduler import base_scheduler
LOG = logging.getLogger(__name__)
class AutoScheduler(object):
def auto_schedule_networks(self, plugin, context, host):
"""Schedule non-hosted networks to the DHCP agent on the specified
host.
"""
agents_per_network = cfg.CONF.dhcp_agents_per_network
# a list of (agent, net_ids) tuples
bindings_to_add = []
with context.session.begin(subtransactions=True):
fields = ['network_id', 'enable_dhcp']
subnets = plugin.get_subnets(context, fields=fields)
net_ids = set(s['network_id'] for s in subnets
if s['enable_dhcp'])
if not net_ids:
LOG.debug('No non-hosted networks')
return False
query = context.session.query(agents_db.Agent)
query = query.filter(agents_db.Agent.agent_type ==
constants.AGENT_TYPE_DHCP,
agents_db.Agent.host == host,
agents_db.Agent.admin_state_up == sql.true())
dhcp_agents = query.all()
for dhcp_agent in dhcp_agents:
if agents_db.AgentDbMixin.is_agent_down(
dhcp_agent.heartbeat_timestamp):
LOG.warning(_LW('DHCP agent %s is not active'),
dhcp_agent.id)
continue
for net_id in net_ids:
agents = plugin.get_dhcp_agents_hosting_networks(
context, [net_id])
if len(agents) >= agents_per_network:
continue
if any(dhcp_agent.id == agent.id for agent in agents):
continue
net = plugin.get_network(context, net_id)
az_hints = (net.get(az_ext.AZ_HINTS) or
cfg.CONF.default_availability_zones)
if (az_hints and
dhcp_agent['availability_zone'] not in az_hints):
continue
bindings_to_add.append((dhcp_agent, net_id))
# do it outside transaction so particular scheduling results don't
# make other to fail
for agent, net_id in bindings_to_add:
self.resource_filter.bind(context, [agent], net_id)
return True
class ChanceScheduler(base_scheduler.BaseChanceScheduler, AutoScheduler):
def __init__(self):
super(ChanceScheduler, self).__init__(DhcpFilter())
class WeightScheduler(base_scheduler.BaseWeightScheduler, AutoScheduler):
def __init__(self):
super(WeightScheduler, self).__init__(DhcpFilter())
class AZAwareWeightScheduler(WeightScheduler):
def select(self, plugin, context, resource_hostable_agents,
resource_hosted_agents, num_agents_needed):
"""AZ aware scheduling
If the network has multiple AZs, agents are scheduled as
follows:
- select AZ with least agents scheduled for the network
(nondeterministic for AZs with same amount of agents scheduled)
- choose agent in the AZ with WeightScheduler
"""
hostable_az_agents = collections.defaultdict(list)
num_az_agents = {}
for agent in resource_hostable_agents:
az_agent = agent['availability_zone']
hostable_az_agents[az_agent].append(agent)
if az_agent not in num_az_agents:
num_az_agents[az_agent] = 0
if num_agents_needed <= 0:
return []
for agent in resource_hosted_agents:
az_agent = agent['availability_zone']
if az_agent in num_az_agents:
num_az_agents[az_agent] += 1
num_az_q = [(value, key) for key, value in num_az_agents.items()]
heapq.heapify(num_az_q)
chosen_agents = []
while num_agents_needed > 0:
num, select_az = heapq.heappop(num_az_q)
select_agent = super(AZAwareWeightScheduler, self).select(
plugin, context, hostable_az_agents[select_az], [], 1)
chosen_agents.append(select_agent[0])
hostable_az_agents[select_az].remove(select_agent[0])
if hostable_az_agents[select_az]:
heapq.heappush(num_az_q, (num + 1, select_az))
num_agents_needed -= 1
return chosen_agents
class DhcpFilter(base_resource_filter.BaseResourceFilter):
def bind(self, context, agents, network_id):
"""Bind the network to the agents."""
# customize the bind logic
bound_agents = agents[:]
for agent in agents:
# saving agent_id to use it after rollback to avoid
# DetachedInstanceError
agent_id = agent.id
binding = agentschedulers_db.NetworkDhcpAgentBinding()
binding.dhcp_agent_id = agent_id
binding.network_id = network_id
try:
with db_api.autonested_transaction(context.session):
context.session.add(binding)
# try to actually write the changes and catch integrity
# DBDuplicateEntry
except db_exc.DBDuplicateEntry:
# it's totally ok, someone just did our job!
bound_agents.remove(agent)
LOG.info(_LI('Agent %s already present'), agent_id)
LOG.debug('Network %(network_id)s is scheduled to be '
'hosted by DHCP agent %(agent_id)s',
{'network_id': network_id,
'agent_id': agent_id})
super(DhcpFilter, self).bind(context, bound_agents, network_id)
def filter_agents(self, plugin, context, network):
"""Return the agents that can host the network.
This function returns a dictionary which has 3 keys.
n_agents: The number of agents should be scheduled. If n_agents=0,
all networks are already scheduled or no more agent can host the
network.
hostable_agents: A list of agents which can host the network.
hosted_agents: A list of agents which already hosts the network.
"""
agents_dict = self._get_network_hostable_dhcp_agents(
plugin, context, network)
if not agents_dict['hostable_agents'] or agents_dict['n_agents'] <= 0:
return {'n_agents': 0, 'hostable_agents': [],
'hosted_agents': agents_dict['hosted_agents']}
return agents_dict
def _get_dhcp_agents_hosting_network(self, plugin, context, network):
"""Return dhcp agents hosting the given network or None if a given
network is already hosted by enough number of agents.
"""
agents_per_network = cfg.CONF.dhcp_agents_per_network
#TODO(gongysh) don't schedule the networks with only
# subnets whose enable_dhcp is false
with context.session.begin(subtransactions=True):
network_hosted_agents = plugin.get_dhcp_agents_hosting_networks(
context, [network['id']])
if len(network_hosted_agents) >= agents_per_network:
LOG.debug('Network %s is already hosted by enough agents.',
network['id'])
return
return network_hosted_agents
def _get_active_agents(self, plugin, context, az_hints):
"""Return a list of active dhcp agents."""
with context.session.begin(subtransactions=True):
filters = {'agent_type': [constants.AGENT_TYPE_DHCP],
'admin_state_up': [True]}
if az_hints:
filters['availability_zone'] = az_hints
active_dhcp_agents = plugin.get_agents_db(
context, filters=filters)
if not active_dhcp_agents:
LOG.warning(_LW('No more DHCP agents'))
return []
return active_dhcp_agents
def _get_network_hostable_dhcp_agents(self, plugin, context, network):
"""Provide information on hostable DHCP agents for network.
The returned value includes the number of agents that will actually
host the given network, a list of DHCP agents that can host the given
network, and a list of DHCP agents currently hosting the network.
"""
hosted_agents = self._get_dhcp_agents_hosting_network(plugin,
context, network)
if hosted_agents is None:
return {'n_agents': 0, 'hostable_agents': [], 'hosted_agents': []}
n_agents = cfg.CONF.dhcp_agents_per_network - len(hosted_agents)
az_hints = (network.get(az_ext.AZ_HINTS) or
cfg.CONF.default_availability_zones)
active_dhcp_agents = self._get_active_agents(plugin, context, az_hints)
if not active_dhcp_agents:
return {'n_agents': 0, 'hostable_agents': [],
'hosted_agents': hosted_agents}
hostable_dhcp_agents = [
agent for agent in set(active_dhcp_agents)
if agent not in hosted_agents and plugin.is_eligible_agent(
context, True, agent)
]
hostable_dhcp_hosts = plugin.filter_hosts_with_network_access(
context, network['id'],
[agent['host'] for agent in hostable_dhcp_agents])
hostable_dhcp_agents = [agent for agent in hostable_dhcp_agents
if agent['host'] in hostable_dhcp_hosts]
if not hostable_dhcp_agents:
return {'n_agents': 0, 'hostable_agents': [],
'hosted_agents': hosted_agents}
n_agents = min(len(hostable_dhcp_agents), n_agents)
return {'n_agents': n_agents, 'hostable_agents': hostable_dhcp_agents,
'hosted_agents': hosted_agents}
|
|
#!/usr/bin/env python
from types import StringTypes
from lxml import etree
from StringIO import StringIO
# helper functions to help build xpaths
class XpathFilter:
@staticmethod
def filter_value(key, value):
xpath = ""
if isinstance(value, str):
if '*' in value:
value = value.replace('*', '')
xpath = 'contains(%s, "%s")' % (key, value)
else:
xpath = '%s="%s"' % (key, value)
return xpath
@staticmethod
def xpath(filter={}):
xpath = ""
if filter:
filter_list = []
for (key, value) in filter.items():
if key == 'text':
key = 'text()'
else:
key = '@'+key
if isinstance(value, str):
filter_list.append(XpathFilter.filter_value(key, value))
elif isinstance(value, list):
stmt = ' or '.join([XpathFilter.filter_value(key, str(val)) for val in value])
filter_list.append(stmt)
if filter_list:
xpath = ' and '.join(filter_list)
xpath = '[' + xpath + ']'
return xpath
# a wrapper class around lxml.etree._Element
# the reason why we need this one is because of the limitations
# we've found in xpath to address documents with multiple namespaces defined
# in a nutshell, we deal with xml documents that have
# a default namespace defined (xmlns="http://default.com/") and specific prefixes defined
# (xmlns:foo="http://foo.com")
# according to the documentation instead of writing
# element.xpath ( "//node/foo:subnode" )
# we'd then need to write xpaths like
# element.xpath ( "//{http://default.com/}node/{http://foo.com}subnode" )
# which is a real pain..
# So just so we can keep some reasonable programming style we need to manage the
# namespace map that goes with the _Element (its internal .nsmap being unmutable)
class XmlElement:
def __init__(self, element, namespaces):
self.element = element
self.namespaces = namespaces
# redefine as few methods as possible
def xpath(self, xpath, namespaces=None):
if not namespaces:
namespaces = self.namespaces
elems = self.element.xpath(xpath, namespaces=namespaces)
return [XmlElement(elem, namespaces) for elem in elems]
def add_element(self, tagname, **kwds):
element = etree.SubElement(self.element, tagname, **kwds)
return XmlElement(element, self.namespaces)
def append(self, elem):
if isinstance(elem, XmlElement):
self.element.append(elem.element)
else:
self.element.append(elem)
def getparent(self):
return XmlElement(self.element.getparent(), self.namespaces)
def get_instance(self, instance_class=None, fields=[]):
"""
Returns an instance (dict) of this xml element. The instance
holds a reference to this xml element.
"""
if not instance_class:
instance_class = Object
if not fields and hasattr(instance_class, 'fields'):
fields = instance_class.fields
if not fields:
instance = instance_class(self.attrib, self)
else:
instance = instance_class({}, self)
for field in fields:
if field in self.attrib:
instance[field] = self.attrib[field]
return instance
def add_instance(self, name, instance, fields=[]):
"""
Adds the specifed instance(s) as a child element of this xml
element.
"""
if not fields and hasattr(instance, 'keys'):
fields = instance.keys()
elem = self.add_element(name)
for field in fields:
if field in instance and instance[field]:
elem.set(field, unicode(instance[field]))
return elem
def remove_elements(self, name):
"""
Removes all occurences of an element from the tree. Start at
specified root_node if specified, otherwise start at tree's root.
"""
if not element_name.startswith('//'):
element_name = '//' + element_name
elements = self.element.xpath('%s ' % name, namespaces=self.namespaces)
for element in elements:
parent = element.getparent()
parent.remove(element)
def delete(self):
parent = self.getparent()
parent.remove(self)
def remove(self, element):
if isinstance(element, XmlElement):
self.element.remove(element.element)
else:
self.element.remove(element)
def set_text(self, text):
self.element.text = text
# Element does not have unset ?!?
def unset(self, key):
del self.element.attrib[key]
def toxml(self):
return etree.tostring(self.element, encoding='UTF-8', pretty_print=True)
def __str__(self):
return self.toxml()
# are redirected on self.element
def __getattr__ (self, name):
if not hasattr(self.element, name):
raise AttributeError, name
return getattr(self.element, name)
class Xml:
def __init__(self, xml=None, namespaces=None):
self.root = None
self.namespaces = namespaces
self.default_namespace = None
self.schema = None
if isinstance(xml, basestring):
self.parse_xml(xml)
if isinstance(xml, XmlElement):
self.root = xml
self.namespaces = xml.namespaces
elif isinstance(xml, etree._ElementTree) or isinstance(xml, etree._Element):
self.parse_xml(etree.tostring(xml))
def parse_xml(self, xml):
"""
parse rspec into etree
"""
parser = etree.XMLParser(remove_blank_text=True)
try:
tree = etree.parse(xml, parser)
except IOError:
# 'rspec' file doesnt exist. 'rspec' is proably an xml string
try:
tree = etree.parse(StringIO(xml), parser)
except Exception, e:
raise Exception, str(e)
root = tree.getroot()
self.namespaces = dict(root.nsmap)
# set namespaces map
if 'default' not in self.namespaces and None in self.namespaces:
# If the 'None' exist, then it's pointing to the default namespace. This makes
# it hard for us to write xpath queries for the default naemspace because lxml
# wont understand a None prefix. We will just associate the default namespeace
# with a key named 'default'.
self.namespaces['default'] = self.namespaces.pop(None)
else:
self.namespaces['default'] = 'default'
self.root = XmlElement(root, self.namespaces)
# set schema
for key in self.root.attrib.keys():
if key.endswith('schemaLocation'):
# schemaLocation should be at the end of the list.
# Use list comprehension to filter out empty strings
schema_parts = [x for x in self.root.attrib[key].split(' ') if x]
self.schema = schema_parts[1]
namespace, schema = schema_parts[0], schema_parts[1]
break
def parse_dict(self, d, root_tag_name='xml', element = None):
if element is None:
if self.root is None:
self.parse_xml('<%s/>' % root_tag_name)
element = self.root.element
if 'text' in d:
text = d.pop('text')
element.text = text
# handle repeating fields
for (key, value) in d.items():
if isinstance(value, list):
value = d.pop(key)
for val in value:
if isinstance(val, dict):
child_element = etree.SubElement(element, key)
self.parse_dict(val, key, child_element)
elif isinstance(val, basestring):
child_element = etree.SubElement(element, key).text = val
elif isinstance(value, int):
d[key] = unicode(d[key])
elif value is None:
d.pop(key)
# element.attrib.update will explode if DateTimes are in the
# dcitionary.
d=d.copy()
# looks like iteritems won't stand side-effects
for k in d.keys():
if not isinstance(d[k],StringTypes):
del d[k]
element.attrib.update(d)
def validate(self, schema):
"""
Validate against rng schema
"""
relaxng_doc = etree.parse(schema)
relaxng = etree.RelaxNG(relaxng_doc)
if not relaxng(self.root):
error = relaxng.error_log.last_error
message = "%s (line %s)" % (error.message, error.line)
raise Exception, message
return True
def xpath(self, xpath, namespaces=None):
if not namespaces:
namespaces = self.namespaces
return self.root.xpath(xpath, namespaces=namespaces)
def set(self, key, value):
return self.root.set(key, value)
def remove_attribute(self, name, element=None):
if not element:
element = self.root
element.remove_attribute(name)
def add_element(self, *args, **kwds):
"""
Wrapper around etree.SubElement(). Adds an element to
specified parent node. Adds element to root node is parent is
not specified.
"""
return self.root.add_element(*args, **kwds)
def remove_elements(self, name, element = None):
"""
Removes all occurences of an element from the tree. Start at
specified root_node if specified, otherwise start at tree's root.
"""
if not element:
element = self.root
element.remove_elements(name)
def add_instance(self, *args, **kwds):
return self.root.add_instance(*args, **kwds)
def get_instance(self, *args, **kwds):
return self.root.get_instnace(*args, **kwds)
def get_element_attributes(self, elem=None, depth=0):
if elem == None:
elem = self.root
if not hasattr(elem, 'attrib'):
# this is probably not an element node with attribute. could be just and an
# attribute, return it
return elem
attrs = dict(elem.attrib)
attrs['text'] = str(elem.text).strip()
attrs['parent'] = elem.getparent()
if isinstance(depth, int) and depth > 0:
for child_elem in list(elem):
key = str(child_elem.tag)
if key not in attrs:
attrs[key] = [self.get_element_attributes(child_elem, depth-1)]
else:
attrs[key].append(self.get_element_attributes(child_elem, depth-1))
else:
attrs['child_nodes'] = list(elem)
return attrs
def append(self, elem):
return self.root.append(elem)
def iterchildren(self):
return self.root.iterchildren()
def merge(self, in_xml):
pass
def __str__(self):
return self.toxml()
def toxml(self):
return etree.tostring(self.root.element, encoding='UTF-8', pretty_print=True)
# XXX smbaker, for record.load_from_string
def todict(self, elem=None):
if elem is None:
elem = self.root
d = {}
d.update(elem.attrib)
d['text'] = elem.text
for child in elem.iterchildren():
if child.tag not in d:
d[child.tag] = []
d[child.tag].append(self.todict(child))
if len(d)==1 and ("text" in d):
d = d["text"]
return d
def save(self, filename):
f = open(filename, 'w')
f.write(self.toxml())
f.close()
|
|
#!/usr/bin/python3
import gym
from gym import wrappers
import sys
import argparse
import numpy as np
import sklearn.pipeline
import sklearn.preprocessing
from sklearn.linear_model import SGDRegressor
from sklearn.kernel_approximation import RBFSampler
from matplotlib import pyplot as plt
plt.style.use("ggplot")
def plot_unimetric(history, metric, save_dir):
plt.figure()
plt.plot(history[metric])
plt.title('model {}'.format(metric))
plt.ylabel(metric)
plt.xlabel('epoch')
plt.savefig("{}/{}.png".format(save_dir, metric),
format='png', dpi=300)
def make_epsilon_greedy_policy(estimator, epsilon, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function approximator and epsilon.
Args:
estimator: An estimator that returns q values for a given state
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the observation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation):
A = np.ones(nA, dtype=float) * epsilon / nA
q_values = estimator.predict(observation)
best_action = np.argmax(q_values)
A[best_action] += (1.0 - epsilon)
return A
return policy_fn
class Estimator(object):
"""
Value Function approximator.
"""
def __init__(self, env):
self._prepare_estimator_for_env(env)
# We create a separate model for each action in the environment's
# action space. Alternatively we could somehow encode the action
# into the features, but this way it's easier to code up.
self.models = []
for _ in range(env.action_space.n):
model = SGDRegressor(learning_rate="constant")
# We need to call partial_fit once to initialize the model
# or we get a NotFittedError when trying to make a prediction
# This is quite hacky.
model.partial_fit([self.featurize_state(env.reset())], [0])
self.models.append(model)
def _prepare_estimator_for_env(self, env):
observation_examples = np.array(
[env.observation_space.sample() for _ in range(1000)])
observation_examples = self._vectorise_state(observation_examples)
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit(observation_examples)
self.scaler = scaler
featurizer = sklearn.pipeline.FeatureUnion([
("rbf1", RBFSampler(gamma=5.0, n_components=100)),
("rbf2", RBFSampler(gamma=2.0, n_components=100)),
("rbf3", RBFSampler(gamma=1.0, n_components=100)),
("rbf4", RBFSampler(gamma=0.5, n_components=100))
])
featurizer.fit(scaler.transform(observation_examples))
self.featurizer = featurizer
def _vectorise_state(self, states):
obs_shape = states.shape
if len(obs_shape) > 2:
states = states.reshape((obs_shape[0], -1))
return states
def featurize_state(self, state):
"""
Returns the featurized representation for a state.
"""
state = self._vectorise_state(np.array([state]))
scaled = self.scaler.transform(state)
featurized = self.featurizer.transform(scaled)
return featurized[0]
def predict(self, s, a=None):
"""
Makes value function predictions.
Args:
s: state to make a prediction for
a: (Optional) action to make a prediction for
Returns
If an action a is given this returns a single number as the prediction.
If no action is given this returns a vector or predictions for all actions
in the environment where pred[i] is the prediction for action i.
"""
features = self.featurize_state(s)
return self.models[a].predict([features])[0] if a \
else np.array([model.predict([features])[0] for model in self.models])
def update(self, s, a, y):
"""
Updates the estimator parameters for a given state and action towards
the target y.
"""
features = self.featurize_state(s)
self.models[a].partial_fit([features], [y])
def q_learning(env, estimator, num_episodes, discount_factor=1.0, epsilon=0.1, epsilon_decay=1.0,
verbose=False):
"""
Q-Learning algorithm for fff-policy TD control using Function Approximation.
Finds the optimal greedy policy while following an epsilon-greedy policy.
Args:
env: OpenAI environment.
estimator: Action-Value function estimator
num_episodes: Number of episodes to run for.
discount_factor: Lambda time discount factor.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
epsilon_decay: Each episode, epsilon is decayed by this factor
Returns:
An EpisodeStats object with two numpy arrays for episode_lengths and episode_rewards.
"""
# Keeps track of useful statistics
episode_lengths = np.zeros(num_episodes)
episode_rewards = np.zeros(num_episodes)
for i_episode in range(num_episodes):
# The policy we're following
policy = make_epsilon_greedy_policy(
estimator, epsilon * epsilon_decay ** i_episode, env.action_space.n)
# Print out which episode we're on, useful for debugging.
# Also print reward for last episode
if verbose:
last_reward = episode_rewards[i_episode - 1]
print("\rEpisode {}/{} ({})".format(i_episode + 1, num_episodes, last_reward), end="")
sys.stdout.flush()
state = env.reset()
n_action = None
len_counter = 0
reward_counter = 0
done = False
while not done:
if verbose:
pass
# env.render()
if n_action is None:
probs = policy(state)
action = np.random.choice(np.arange(len(probs)), p=probs)
else:
action = n_action
n_state, reward, done, info = env.step(action)
reward_counter += reward
len_counter += 1
q_val_next = estimator.predict(n_state)
td_target = reward + discount_factor * np.max(q_val_next)
estimator.update(state, action, td_target)
state = n_state
episode_rewards[i_episode] = reward_counter
episode_lengths[i_episode] = len_counter
return {"episode_rewards": episode_rewards, "episode_lengths": episode_lengths}
def _parse_args():
parser = argparse.ArgumentParser(description='Policy iteration example')
parser.add_argument(
'--env',
type=str,
default='MountainCar-v0', # CartPole-v0, MountainCar-v0
help='The environment to use')
parser.add_argument(
'--num_episodes',
type=int,
default=1000,
help='Number of episodes')
parser.add_argument(
'--gamma',
type=float,
default=0.99,
help='Gamma discount factor')
parser.add_argument(
'--verbose',
action='store_true',
default=False)
parser.add_argument(
'--plot_stats',
action='store_true',
default=False)
parser.add_argument(
'--api_key',
type=str,
default=None)
args, _ = parser.parse_known_args()
return args
def save_stats(stats, save_dir="./"):
for key in stats:
plot_unimetric(stats, key, save_dir)
def run(env, n_episodes, discount_factor, verbose=False, plot_stats=False, api_key=None):
env_name = env
env = gym.make(env)
estimator = Estimator(env)
if api_key is not None:
env = gym.wrappers.Monitor(env, "/tmp/" + env_name, force=True)
stats = q_learning(env, estimator, n_episodes,
discount_factor=discount_factor, epsilon=0.0,
verbose=verbose)
if plot_stats:
save_stats(stats)
if api_key is not None:
env.close()
gym.upload("/tmp/" + env_name, api_key=api_key)
def main():
args = _parse_args()
run(args.env, args.num_episodes, args.gamma,
args.verbose, args.plot_stats, args.api_key)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
#-Imports----------------------------------------------------------------------
#--Python Imports.
import os
import sys
#--wxPython Imports.
import wx
from wx.lib.embeddedimage import PyEmbeddedImage
#-Globals----------------------------------------------------------------------
paperairplane_arrow_blue24 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAACpUlEQVR42q3VC0hTYRgG4M6m"
"m8uRmGaVV8SQSrDMbJ6azpZ40qXLXM05y2GikFdKS4PUrEINUygVQ0FalmU3soiIhEohbEGS"
"RlQkRChUUAVBCL19/zBBNNDT+eBh8AO87LtsC6h+ExNRE45IWwo3d9DHBNETleQhfqEahMVa"
"WMhPEkaUkgYErtHCWNSOkAgBnEz+mZ4CiKtkAaFRBmxNPwJDbj2Cw7eA42Tv6NmHuEgSEKY1"
"wZjXgJiUYtiqLmNZcDjouZ94SxISrtsNS2kHNPG5EDKqkXe8G55LA1nIJeJJ5ER8rYszI6vC"
"Dk1CgVNqXhMKTnVjoYc3C6kjHkQmOiBSn46cym5oth2ckl1uR9FJO5QqNbuRbKIWHbIxPgP5"
"J65DY6iYpvT0Lew/2gq5q/L/boQXrDhQ34vo5GPT8Ck1qOvoQ1ZhDWRyF/E3EpOYiYqm+4je"
"UTuDztwA+53nSDJls/UVdyO67XtQ3dKH6J2NsxJsbbj75BUieT0b+vxvJN64F7XtA+BNrf+U"
"X30bjx2v4eamYiH3iNec11dItaHRPgje3DErU3EPHMMfoNE6v8E3kkaC5jwPQ5oNzVdegLdc"
"nGGztQuOkTHssu7DAo5jgzaS9fNqU1KqFed7hsBbr86QU/UQff0OtkXsHkpIFFlOFHNeWSEx"
"BeNfvuPZ8Bh6H42irWcElrIH2JR5Ew2dQzhUWft3uLFTrZlnwcvbBzp9AvJLDqOz6xrejI6j"
"7MwgLvS+hZBsZgEtJIJ4ijm2QnKDvCSfqNe/fANCMPDUgfcffyAoZBULsJKVzmsWUWriT1aT"
"tWQDKXdVKCfOnmuGr58/67+WrBD7yyojCuJOFk22YQmJI1/JBOHZm5T/1zKimhzq4GT/FxNJ"
"iyMK4kP8nK0UUX8Azg5aSnmghYAAAAAASUVORK5CYII=")
paperairplane_arrow_white24 = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAAB2ElEQVR4Aa3VsatSYRiA8Twd"
"y5Pa5SoXi0RpErQuQehQIEREi+ka5KSLew4uDaKLgy26C7bkpIHgaOlgSINIEjgo0f0HqiFo"
"eXve4UxO174PfogqPpzj971eYf3EQ/jggdnl8/mEh+84xzXjgUQiIdlsViNfEYfXeKDZbEoy"
"mdTIR0RgGw30+31pt9sSjUY18h4hXDUWGAwGsl6vpV6vSzgc1shbnMAyFtjv97JYLKRarYrf"
"79fIawRhGQuo6XQqlUpFvF7vX95+BT88xgJqPB5LsVgUy7J+85En7hkxFlD6Wj6f11v1wz0j"
"RgO73U56vd7hGTEVUNvtVrrdrqRSqcMzYiKgNpuNtFqtwzNiKqBms5luX91ZGqnjJiwjgdVq"
"JY1Gw72CL3iKW7D/O6A/dKfTcefUNzzHPZwauYLlcimlUkm//A+KeIAz2JeeprpbdOiNRiPR"
"26KByWQimUxGAx/wCHeO2ariOI7E43FJp9NSKBSkVqvJfD6X4XAosVjMnUtJBI450c/wBu/w"
"CXsNlstl0fsfCAQ08AKxY/+MHERwFwncx0vm0EUul5NgMPiL548ROXayemDjOm4giBDO8RkX"
"yCAEY8sDDd5GF0mcwPiycYozOLj0+gej7JQuh90YaAAAAABJRU5ErkJggg==")
gFileDir = os.path.dirname(os.path.abspath(__file__))
ID_PAPERAIRPLANE_ARROW_BLUE = 2001
ID_PAPERAIRPLANE_ARROW_RED = 2002
ID_PAPERAIRPLANE_ARROW_GREY = 2003
ID_PAPERAIRPLANE_ARROW_DARK = 2004
ID_PAPERAIRPLANE_ARROW_BLUE_FADEOUT80 = 2005
ID_PAPERAIRPLANE_ARROW_COLORSHIFT = 2006
ID_PAPERAIRPLANE_ARROW_WHITE = 2006
ID_PAPERAIRPLANE_ARROW_WHITE_PNG = 2007
ID_PAPERAIRPLANE_ARROW_BLUE_PY = 2008
ID_PAPERAIRPLANE_ARROW_WHITE_PY = 2009
cursors = {
# .cur, .ani loose files.
"paperairplane_arrow_blue.cur" : ID_PAPERAIRPLANE_ARROW_BLUE,
"paperairplane_arrow_red.cur" : ID_PAPERAIRPLANE_ARROW_RED,
"paperairplane_arrow_grey.cur" : ID_PAPERAIRPLANE_ARROW_GREY,
"paperairplane_arrow_dark.cur" : ID_PAPERAIRPLANE_ARROW_DARK,
"paperairplane_arrow_blue_fadeout80.cur" : ID_PAPERAIRPLANE_ARROW_BLUE_FADEOUT80,
"paperairplane_arrow_white.cur" : ID_PAPERAIRPLANE_ARROW_WHITE,
"paperairplane_arrow_colorshift.ani" : ID_PAPERAIRPLANE_ARROW_COLORSHIFT,
# .png loose files.
"paperairplane_arrow_white24.png" : ID_PAPERAIRPLANE_ARROW_WHITE_PNG,
# PyEmbeddedImages
"paperairplane_arrow_blue24 [PyEmbeddedImage]" : ID_PAPERAIRPLANE_ARROW_BLUE_PY,
"paperairplane_arrow_white24 [PyEmbeddedImage]" : ID_PAPERAIRPLANE_ARROW_WHITE_PY,
# wxPython Stock Cursors.
"wx.CURSOR_ARROW" : wx.CURSOR_ARROW,
"wx.CURSOR_RIGHT_ARROW" : wx.CURSOR_RIGHT_ARROW,
"wx.CURSOR_BULLSEYE" : wx.CURSOR_BULLSEYE,
"wx.CURSOR_CHAR" : wx.CURSOR_CHAR,
"wx.CURSOR_CROSS" : wx.CURSOR_CROSS,
"wx.CURSOR_HAND" : wx.CURSOR_HAND,
"wx.CURSOR_IBEAM" : wx.CURSOR_IBEAM,
"wx.CURSOR_LEFT_BUTTON" : wx.CURSOR_LEFT_BUTTON,
"wx.CURSOR_MAGNIFIER" : wx.CURSOR_MAGNIFIER,
"wx.CURSOR_MIDDLE_BUTTON" : wx.CURSOR_MIDDLE_BUTTON,
"wx.CURSOR_NO_ENTRY" : wx.CURSOR_NO_ENTRY,
"wx.CURSOR_PAINT_BRUSH" : wx.CURSOR_PAINT_BRUSH,
"wx.CURSOR_PENCIL" : wx.CURSOR_PENCIL,
"wx.CURSOR_POINT_LEFT" : wx.CURSOR_POINT_LEFT,
"wx.CURSOR_POINT_RIGHT" : wx.CURSOR_POINT_RIGHT,
"wx.CURSOR_QUESTION_ARROW" : wx.CURSOR_QUESTION_ARROW,
"wx.CURSOR_RIGHT_BUTTON" : wx.CURSOR_RIGHT_BUTTON,
"wx.CURSOR_SIZENESW" : wx.CURSOR_SIZENESW,
"wx.CURSOR_SIZENS" : wx.CURSOR_SIZENS,
"wx.CURSOR_SIZENWSE" : wx.CURSOR_SIZENWSE,
"wx.CURSOR_SIZEWE" : wx.CURSOR_SIZEWE,
"wx.CURSOR_SIZING" : wx.CURSOR_SIZING,
"wx.CURSOR_SPRAYCAN" : wx.CURSOR_SPRAYCAN,
"wx.CURSOR_WAIT" : wx.CURSOR_WAIT,
"wx.CURSOR_WATCH" : wx.CURSOR_WATCH,
"wx.CURSOR_BLANK" : wx.CURSOR_BLANK,
"wx.CURSOR_DEFAULT" : wx.CURSOR_DEFAULT,
"wx.CURSOR_COPY_ARROW" : wx.CURSOR_COPY_ARROW,
"wx.CURSOR_ARROWWAIT" : wx.CURSOR_ARROWWAIT,
}
#-Classes----------------------------------------------------------------------
class DrawWindow(wx.Window):
def __init__(self, parent, log, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0):
wx.Window.__init__(self, parent, id, pos, size, style)
self.log = log
self.SetBackgroundColour(wx.WHITE)
self.lines = []
self.x = self.y = 0
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def OnPaint(self, event):
dc = wx.PaintDC(self)
self.DrawSavedLines(dc)
def DrawSavedLines(self, dc):
dc.SetPen(wx.Pen(wx.BLUE, 1))
for line in self.lines:
for coords in line:
dc.DrawLine(*coords)
def OnLeftDown(self, event):
self.curLine = []
self.x, self.y = event.GetPosition()
self.CaptureMouse()
def OnLeftUp(self, event):
if self.HasCapture():
self.lines.append(self.curLine)
self.curLine = []
self.ReleaseMouse()
def OnMotion(self, event):
if self.HasCapture() and event.Dragging():
dc = wx.ClientDC(self)
dc.SetPen(wx.Pen(wx.BLUE, 1))
evtPos = event.GetPosition()
coords = (self.x, self.y) + (evtPos.x, evtPos.y)
self.curLine.append(coords)
dc.DrawLine(*coords)
self.x, self.y = event.GetPosition()
class CursorTestPanel(wx.Panel):
"""
Cursor Test Panel inspired by AniFX cursor test panel.
"""
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1, style=wx.BORDER_SUNKEN)
# Create all the widgets for the test panel.
pnl = wx.Panel(self, -1, style=wx.BORDER_SIMPLE)
pnl.SetBackgroundColour(wx.BLACK)
self.win = wx.Window(pnl, -1, size=(200, 100))
self.win.SetBackgroundColour("white")
self.win.Bind(wx.EVT_LEFT_DOWN, self.OnDrawDot)
self.drawWin = DrawWindow(pnl, log, size=(200, 100))
vbSizer0 = wx.BoxSizer(wx.VERTICAL)
vbSizer0.Add(self.win, 1, wx.EXPAND | wx.BOTTOM, 1)
vbSizer0.Add(self.drawWin, 1, wx.EXPAND)
pnl.SetSizer(vbSizer0)
b = wx.Button(self, -1, 'Button')
tc = wx.TextCtrl(self, -1, 'Text Ctrl')
rb1 = wx.RadioButton(self, -1, 'Radio Button 1')
rb2 = wx.RadioButton(self, -1, 'Radio Button 2')
cb = wx.CheckBox(self, -1, 'Check Box')
combo = wx.ComboBox(self, -1, 'One', choices=('One', 'Two', 'Three', 'Four', 'Five'))
sl = wx.Slider(self, -1)
sc = wx.SpinCtrl(self, -1)
# Add all the widgets to a tuple that we will access when changing cursors.
self.allWidgets = (self, pnl, self.win, self.drawWin, b, tc, rb1, rb2, cb, combo, sl, sc)
# Do the panel layout.
vbSizer = wx.BoxSizer(wx.VERTICAL)
hbSizer = wx.BoxSizer(wx.HORIZONTAL)
gSizer = wx.GridSizer(rows=4, cols=2, vgap=5, hgap=5)
gSizer.AddMany((b, tc, rb1, rb2, cb, combo, sl, sc))
hbSizer.Add(pnl, 0, wx.EXPAND | wx.ALL, 10)
hbSizer.Add(gSizer, 0, wx.EXPAND | wx.ALL, 10)
vbSizer.Add(hbSizer, 0, wx.ALL, 10)
self.SetSizer(vbSizer)
def OnDrawDot(self, event):
# Draw a dot so the user can see where the hotspot is.
dc = wx.ClientDC(self.win)
dc.SetPen(wx.Pen("RED"))
dc.SetBrush(wx.Brush("RED"))
pos = event.GetPosition()
dc.DrawCircle(pos.x, pos.y, 4)
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
# Create a list of choices from the dictionary above.
choices = cursors.keys()
choices = sorted(choices)
# Create the controls.
self.cb = wx.ComboBox(self, -1, "wx.CURSOR_DEFAULT", choices=choices,
style=wx.CB_READONLY)
self.tx = wx.StaticText(self, -1, """\
This sample allows you to see all the stock cursors available to wxPython,
and also custom cursors loaded from images, .cur, or .ani files. Simply
select a name from the wx.Choice and then move the mouse into the window
or the widgets below to see the cursor.
NOTE: not all stock cursors have a specific representation on all platforms.
""")
self.testPanel = CursorTestPanel(self, log)
# Bind events.
self.Bind(wx.EVT_COMBOBOX, self.OnChooseCursor, self.cb)
# Setup the layout.
vbSizer = wx.BoxSizer(wx.VERTICAL)
vbSizer.Add(self.tx, 0, wx.ALL, 10)
vbSizer.Add(self.cb, 0, wx.LEFT | wx.BOTTOM, 10)
gbs = wx.GridBagSizer(8, 8)
gbs.Add(self.testPanel, (0, 1), (1, 4), wx.ALIGN_LEFT)
vbSizer.Add(gbs, 0, wx.ALL)
self.SetSizer(vbSizer)
wx.CallAfter(self.cb.SetFocus) # Convienience start for mousewheel switching.
def OnChooseCursor(self, evt):
# Clear the dots.
self.testPanel.win.Refresh()
self.testPanel.drawWin.lines = []
self.testPanel.drawWin.Refresh()
choice = self.cb.GetStringSelection()
self.log.WriteText("Selecting the %s cursor\n" % choice)
cnum = cursors[choice]
if cnum in (ID_PAPERAIRPLANE_ARROW_BLUE,
ID_PAPERAIRPLANE_ARROW_RED,
ID_PAPERAIRPLANE_ARROW_GREY,
ID_PAPERAIRPLANE_ARROW_DARK,
ID_PAPERAIRPLANE_ARROW_BLUE_FADEOUT80,
ID_PAPERAIRPLANE_ARROW_WHITE,
ID_PAPERAIRPLANE_ARROW_COLORSHIFT): # .cur or .ani loose files.
if choice.endswith('.ani'):
cursor = wx.Cursor(gFileDir + os.sep + 'cursors' + os.sep + choice, wx.BITMAP_TYPE_ANI)
if choice.endswith('.cur'):
cursor = wx.Cursor(gFileDir + os.sep + 'cursors' + os.sep + choice, wx.BITMAP_TYPE_CUR)
elif cnum == ID_PAPERAIRPLANE_ARROW_WHITE_PNG: # .png loose files.
image = wx.Image(gFileDir + os.sep + 'cursors' + os.sep + choice, wx.BITMAP_TYPE_PNG)
# Since these image didn't come from a .cur or .ani file,
# tell it where the hotspot is.
image.SetOption(wx.IMAGE_OPTION_CUR_HOTSPOT_X, 0)
image.SetOption(wx.IMAGE_OPTION_CUR_HOTSPOT_Y, 0)
# Make the image into a cursor.
cursor = wx.Cursor(image)
elif cnum in (ID_PAPERAIRPLANE_ARROW_BLUE_PY,
ID_PAPERAIRPLANE_ARROW_WHITE_PY): # PyEmbeddedImages
if cnum == ID_PAPERAIRPLANE_ARROW_BLUE_PY:
image = paperairplane_arrow_blue24.GetImage()
elif cnum == ID_PAPERAIRPLANE_ARROW_WHITE_PY:
image = paperairplane_arrow_white24.GetImage()
# Since these image didn't come from a .cur or .ani file,
# tell it where the hotspot is.
image.SetOption(wx.IMAGE_OPTION_CUR_HOTSPOT_X, 0)
image.SetOption(wx.IMAGE_OPTION_CUR_HOTSPOT_Y, 0)
# Make the image into a cursor.
cursor = wx.Cursor(image)
else:
# Create one of the stock (built-in) cursors.
cursor = wx.Cursor(cnum)
# Set the cursors for all the testPanels widgets.
[widget.SetCursor(cursor) for widget in self.testPanel.allWidgets]
def OnDrawDot(self, evt):
# Draw a dot so the user can see where the hotspot is.
dc = wx.ClientDC(self.win)
dc.SetPen(wx.Pen("RED"))
dc.SetBrush(wx.Brush("RED"))
pos = evt.GetPosition()
dc.DrawCircle(pos.x, pos.y, 3)
#-wxPython Demo----------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
overview = """<html><body>
<h2><center>wx.Cursor</center></h2>
This demo shows the stock mouse cursors that are available to wxPython.
</body></html>
"""
if __name__ == '__main__':
import os
import sys
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
from __future__ import print_function
import argparse
import logging
import os
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables._n_a_m_e import NameRecord
from bakery_cli.logger import logger
from bakery_cli.utils import NameTableNamingRule
description = ''
parser = argparse.ArgumentParser(description=description)
parser.add_argument('ttf_font', nargs='+',
help="Font in OpenType (TTF/OTF) format")
parser.add_argument('--autofix', action="store_true",
help="Autofix font metrics")
parser.add_argument('--verbose', action='store_true',
help='Print output in verbose')
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
class FixNotApplied(Exception):
def __init__(self, message):
self.message = message
def fontStyleIsBold(fontStyle):
try:
fontStyle.index('Bold')
except ValueError:
return False
return True
def setMacStyle(font, newvalue):
font['head'].macStyle = newvalue
def setFsSelection(font, newvalue):
font['OS/2'].fsSelection = newvalue
def setValidNameRecord(font, nameId, val):
result_namerec = None
for k, p in [[1, 0], [3, 1]]:
result_namerec = font['name'].getName(nameId, k, p)
if result_namerec:
result_namerec.string = (val or '').encode(result_namerec.getEncoding())
if result_namerec:
return
ot_namerecord = NameRecord()
ot_namerecord.nameID = nameId
ot_namerecord.platformID = 3
ot_namerecord.langID = 0x409
# When building a Unicode font for Windows, the platform ID
# should be 3 and the encoding ID should be 1
ot_namerecord.platEncID = 1
ot_namerecord.string = (val or '').encode(ot_namerecord.getEncoding())
font['name'].names.append(ot_namerecord)
return
def setValidNames(font, isBold):
name = font['name'].getName(1, 3, 1)
familyname = name.string.decode(name.getEncoding())
rules = NameTableNamingRule({'isBold': isBold,
'isItalic': True,
'familyName': familyname,
'weight': font['OS/2'].usWeightClass})
names = []
passedNamesId = []
for rec in font['name'].names:
string = rec.string.decode(rec.getEncoding())
if rec.nameID in [1, 2, 4, 6, 16, 17, 18]:
string = rules.apply(rec.nameID)
passedNamesId.append(rec.nameID)
names.append({'nameID': rec.nameID, 'string': string})
difference = set([1, 2, 4, 6, 16, 17, 18]).difference(set(passedNamesId))
if difference:
for nameID in difference:
string = rules.apply(nameID)
names.append({'nameID': nameID, 'string': string})
for field in names:
setValidNameRecord(font, field['nameID'], field['string'])
for name in font['name'].names:
logger.debug(u'{}: {}'.format(name.nameID, name.string.decode(name.getEncoding())))
def italicAngle(font, newvalue):
font['post'].italicAngle = newvalue
def getSuggestedItalicAngle(font):
return -10
def validate(font, fontStyle):
errors = []
f = '{:#09b}'.format(font['head'].macStyle)
if fontStyle.endswith('Italic'):
if not fontStyleIsBold(fontStyle):
if not bool(font['head'].macStyle & 0b10):
errors.append(('ER: HEAD macStyle is {} should be 00000010'.format(f),
setMacStyle, [font, font['head'].macStyle | 0b10]))
elif not bool(font['head'].macStyle & 0b11):
errors.append(('ER: HEAD macStyle is {} should be 00000011'.format(f),
setMacStyle, [font, font['head'].macStyle | 0b11]))
else:
if not fontStyleIsBold(fontStyle):
if bool(font['head'].macStyle & 0b10):
newvalue = font['head'].macStyle | 0b1111111111111100
errors.append(('ER: HEAD macStyle is {} should be {:#09b}'.format(f, newvalue),
setMacStyle, [font, newvalue]))
elif bool(font['head'].macStyle & 0b01):
newvalue = font['head'].macStyle | 0b1111111111111101
errors.append(('ER: HEAD macStyle is {} should be {:#09b}'.format(f, newvalue),
setMacStyle, [font, newvalue]))
if font['post'].italicAngle != 0 and not fontStyle.endswith('Italic'):
errors.append(('ER: POST italicAngle is {} should be 0'.format(font['post'].italicAngle),
italicAngle, [font, 0]))
if font['post'].italicAngle == 0 and fontStyle.endswith('Italic'):
newvalue = getSuggestedItalicAngle(font)
errors.append(('ER: POST italicAngle is 0 should be {}'.format(newvalue),
italicAngle, [font, newvalue]))
# Check NAME table contains correct names for Italic
if fontStyle.endswith('Italic'):
if not fontStyleIsBold(fontStyle):
if font['OS/2'].fsSelection & 0b000001:
logger.info('OK: OS/2 fsSelection')
else:
newvalue = font['OS/2'].fsSelection | 0b1
msg = 'ER: OS/2 fsSelection is {:#06b} should be {:#06b}'
errors.append((msg.format(font['OS/2'].fsSelection, newvalue),
setFsSelection, [font, newvalue]))
else:
if font['OS/2'].fsSelection & 0b100001:
logger.info('OK: OS/2 fsSelection')
else:
newvalue = font['OS/2'].fsSelection | 0b100001
msg = 'ER: OS/2 fsSelection is {:#06b} should be {:#06b}'
errors.append((msg.format(font['OS/2'].fsSelection, newvalue),
setFsSelection, [font, newvalue]))
elif fontStyleIsBold(fontStyle):
if font['OS/2'].fsSelection & 0b100000:
logger.info('OK: OS/2 fsSelection')
else:
newvalue = font['OS/2'].fsSelection | 0b100000
msg = 'ER: OS/2 fsSelection is {:#06b} should be {:#06b}'
errors.append((msg.format(font['OS/2'].fsSelection, newvalue),
setFsSelection, [font, newvalue]))
for name in font['name'].names:
if name.nameID not in [2, 4, 6, 17]:
continue
string = name.string.decode(name.getEncoding())
if fontStyle.endswith('Italic'):
if string.endswith('Italic'):
logger.info('OK: NAME ID{}:\t{}'.format(name.nameID, string))
else:
errors.append(('ER: NAME ID{}:\t{}'.format(name.nameID, string),
setValidNames, [font, fontStyleIsBold(fontStyle)]))
elif fontStyleIsBold(fontStyle):
if fontStyleIsBold(string):
logger.info('OK: NAME ID{}:\t{}'.format(name.nameID, string))
else:
errors.append(('ER: NAME ID{}:\t{}'.format(name.nameID, string),
setValidNames, [font, fontStyleIsBold(fontStyle)]))
return errors
for path in args.ttf_font:
if not os.path.exists(path):
continue
font = TTFont(path)
name = font['name'].getName(2, 3, 1)
if not name:
continue
fontStyle = name.string.decode(name.getEncoding())
errors = validate(font, fontStyle)
if errors:
for error, function, arguments in errors:
logger.error(error)
if args.autofix and function:
try:
function(*arguments)
except FixNotApplied as ex:
logger.error('ER: Fix can not be applied. See details below')
logger.error('\t{}'.format(ex.message))
continue
font.save(path + '.fix')
|
|
from __future__ import print_function, division
from sympy.core import Add, S, sympify, oo, pi, Dummy
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.numbers import Rational
from sympy.core.power import Pow
from sympy.core.compatibility import range
from .zeta_functions import zeta
from .error_functions import erf
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.combinatorial.numbers import bernoulli, harmonic
from sympy.functions.combinatorial.factorials import factorial, rf, RisingFactorial
###############################################################################
############################ COMPLETE GAMMA FUNCTION ##########################
###############################################################################
class gamma(Function):
r"""
The gamma function
.. math::
\Gamma(x) := \int^{\infty}_{0} t^{x-1} e^{t} \mathrm{d}t.
The ``gamma`` function implements the function which passes through the
values of the factorial function, i.e. `\Gamma(n) = (n - 1)!` when n is
an integer. More general, `\Gamma(z)` is defined in the whole complex
plane except at the negative integers where there are simple poles.
Examples
========
>>> from sympy import S, I, pi, oo, gamma
>>> from sympy.abc import x
Several special values are known:
>>> gamma(1)
1
>>> gamma(4)
6
>>> gamma(S(3)/2)
sqrt(pi)/2
The Gamma function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(gamma(x))
gamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(gamma(x), x)
gamma(x)*polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(gamma(x), x, 0, 3)
1/x - EulerGamma + x*(EulerGamma**2/2 + pi**2/12) + x**2*(-EulerGamma*pi**2/12 + polygamma(2, 1)/6 - EulerGamma**3/6) + O(x**3)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> gamma(pi).evalf(40)
2.288037795340032417959588909060233922890
>>> gamma(1+I).evalf(20)
0.49801566811835604271 - 0.15494982830181068512*I
See Also
========
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/GammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma/
"""
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return gamma(self.args[0])*polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg.is_Integer:
if arg.is_positive:
return factorial(arg - 1)
else:
return S.ComplexInfinity
elif arg.is_Rational:
if arg.q == 2:
n = abs(arg.p) // arg.q
if arg.is_positive:
k, coeff = n, S.One
else:
n = k = n + 1
if n & 1 == 0:
coeff = S.One
else:
coeff = S.NegativeOne
for i in range(3, 2*k, 2):
coeff *= i
if arg.is_positive:
return coeff*sqrt(S.Pi) / 2**n
else:
return 2**n*sqrt(S.Pi) / coeff
if arg.is_integer and arg.is_nonpositive:
return S.ComplexInfinity
def _eval_expand_func(self, **hints):
arg = self.args[0]
if arg.is_Rational:
if abs(arg.p) > arg.q:
x = Dummy('x')
n = arg.p // arg.q
p = arg.p - n*arg.q
return gamma(x + n)._eval_expand_func().subs(x, Rational(p, arg.q))
if arg.is_Add:
coeff, tail = arg.as_coeff_add()
if coeff and coeff.q != 1:
intpart = floor(coeff)
tail = (coeff - intpart,) + tail
coeff = intpart
tail = arg._new_rawargs(*tail, reeval=False)
return gamma(tail)*RisingFactorial(tail, coeff)
return self.func(*self.args)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
x = self.args[0]
if x.is_positive or x.is_noninteger:
return True
def _eval_is_positive(self):
x = self.args[0]
if x.is_positive:
return True
elif x.is_noninteger:
return floor(x).is_even
def _eval_rewrite_as_tractable(self, z):
return exp(loggamma(z))
def _eval_rewrite_as_factorial(self, z):
return factorial(z - 1)
def _eval_nseries(self, x, n, logx):
x0 = self.args[0].limit(x, 0)
if not (x0.is_Integer and x0 <= 0):
return super(gamma, self)._eval_nseries(x, n, logx)
t = self.args[0] - x0
return (gamma(t + 1)/rf(self.args[0], -x0 + 1))._eval_nseries(x, n, logx)
def _latex(self, printer, exp=None):
if len(self.args) != 1:
raise ValueError("Args length should be 1")
aa = printer._print(self.args[0])
if exp:
return r'\Gamma^{%s}{\left(%s \right)}' % (printer._print(exp), aa)
else:
return r'\Gamma{\left(%s \right)}' % aa
@staticmethod
def _latex_no_arg(printer):
return r'\Gamma'
###############################################################################
################## LOWER and UPPER INCOMPLETE GAMMA FUNCTIONS #################
###############################################################################
class lowergamma(Function):
r"""
The lower incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\gamma(s, x) := \int_0^x t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \Gamma(s, x).
This can be shown to be the same as
.. math::
\gamma(s, x) = \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
Examples
========
>>> from sympy import lowergamma, S
>>> from sympy.abc import s, x
>>> lowergamma(s, x)
lowergamma(s, x)
>>> lowergamma(3, x)
-x**2*exp(-x) - 2*x*exp(-x) + 2 - 2*exp(-x)
>>> lowergamma(-S(1)/2, x)
-2*sqrt(pi)*erf(sqrt(x)) - 2*exp(-x)/sqrt(x)
See Also
========
gamma: Gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Incomplete_gamma_function#Lower_incomplete_Gamma_function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return gamma(a)*digamma(a) - log(z)*uppergamma(a, z) \
- meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, a, x):
# For lack of a better place, we use this one to extract branching
# information. The following can be
# found in the literature (c/f references given above), albeit scattered:
# 1) For fixed x != 0, lowergamma(s, x) is an entire function of s
# 2) For fixed positive integers s, lowergamma(s, x) is an entire
# function of x.
# 3) For fixed non-positive integers s,
# lowergamma(s, exp(I*2*pi*n)*x) =
# 2*pi*I*n*(-1)**(-s)/factorial(-s) + lowergamma(s, x)
# (this follows from lowergamma(s, x).diff(x) = x**(s-1)*exp(-x)).
# 4) For fixed non-integral s,
# lowergamma(s, x) = x**s*gamma(s)*lowergamma_unbranched(s, x),
# where lowergamma_unbranched(s, x) is an entire function (in fact
# of both s and x), i.e.
# lowergamma(s, exp(2*I*pi*n)*x) = exp(2*pi*I*n*a)*lowergamma(a, x)
from sympy import unpolarify, I
nx, n = x.extract_branch_factor()
if a.is_integer and a.is_positive:
nx = unpolarify(x)
if nx != x:
return lowergamma(a, nx)
elif a.is_integer and a.is_nonpositive:
if n != 0:
return 2*pi*I*n*(-1)**(-a)/factorial(-a) + lowergamma(a, nx)
elif n != 0:
return exp(2*pi*I*n*a)*lowergamma(a, nx)
# Special values.
if a.is_Number:
# TODO this should be non-recursive
if a is S.One:
return S.One - exp(-x)
elif a is S.Half:
return sqrt(pi)*erf(sqrt(x))
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, x) - x**b * exp(-x)
if not a.is_Integer:
return (cls(a + 1, x) + x**a * exp(-x))/a
def _eval_evalf(self, prec):
from mpmath import mp, workprec
from sympy import Expr
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, 0, z)
return Expr._from_mpmath(res, prec)
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_uppergamma(self, s, x):
return gamma(s) - uppergamma(s, x)
def _eval_rewrite_as_expint(self, s, x):
from sympy import expint
if s.is_integer and s.is_nonpositive:
return self
return self.rewrite(uppergamma).rewrite(expint)
@staticmethod
def _latex_no_arg(printer):
return r'\gamma'
class uppergamma(Function):
r"""
The upper incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\Gamma(s, x) := \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \gamma(s, x).
where `\gamma(s, x)` is the lower incomplete gamma function,
:class:`lowergamma`. This can be shown to be the same as
.. math::
\Gamma(s, x) = \Gamma(s) - \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
The upper incomplete gamma function is also essentially equivalent to the
generalized exponential integral:
.. math::
\operatorname{E}_{n}(x) = \int_{1}^{\infty}{\frac{e^{-xt}}{t^n} \, dt} = x^{n-1}\Gamma(1-n,x).
Examples
========
>>> from sympy import uppergamma, S
>>> from sympy.abc import s, x
>>> uppergamma(s, x)
uppergamma(s, x)
>>> uppergamma(3, x)
x**2*exp(-x) + 2*x*exp(-x) + 2*exp(-x)
>>> uppergamma(-S(1)/2, x)
-2*sqrt(pi)*(-erf(sqrt(x)) + 1) + 2*exp(-x)/sqrt(x)
>>> uppergamma(-2, x)
expint(3, x)/x**2
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Incomplete_gamma_function#Upper_incomplete_Gamma_function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
.. [6] http://en.wikipedia.org/wiki/Exponential_integral#Relation_with_other_functions
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return -exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return uppergamma(a, z)*log(z) + meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
from mpmath import mp, workprec
from sympy import Expr
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, z, mp.inf)
return Expr._from_mpmath(res, prec)
@classmethod
def eval(cls, a, z):
from sympy import unpolarify, I, expint
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
return S.Zero
elif z is S.Zero:
# TODO: Holds only for Re(a) > 0:
return gamma(a)
# We extract branching information here. C/f lowergamma.
nx, n = z.extract_branch_factor()
if a.is_integer and (a > 0) == True:
nx = unpolarify(z)
if z != nx:
return uppergamma(a, nx)
elif a.is_integer and (a <= 0) == True:
if n != 0:
return -2*pi*I*n*(-1)**(-a)/factorial(-a) + uppergamma(a, nx)
elif n != 0:
return gamma(a)*(1 - exp(2*pi*I*n*a)) + exp(2*pi*I*n*a)*uppergamma(a, nx)
# Special values.
if a.is_Number:
# TODO this should be non-recursive
if a is S.One:
return exp(-z)
elif a is S.Half:
return sqrt(pi)*(1 - erf(sqrt(z))) # TODO could use erfc...
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, z) + z**b * exp(-z)
elif b.is_Integer:
return expint(-b, z)*unpolarify(z)**(b + 1)
if not a.is_Integer:
return (cls(a + 1, z) - z**a * exp(-z))/a
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_lowergamma(self, s, x):
return gamma(s) - lowergamma(s, x)
def _eval_rewrite_as_expint(self, s, x):
from sympy import expint
return expint(1 - s, x)*x**s
###############################################################################
###################### POLYGAMMA and LOGGAMMA FUNCTIONS #######################
###############################################################################
class polygamma(Function):
r"""
The function ``polygamma(n, z)`` returns ``log(gamma(z)).diff(n + 1)``.
It is a meromorphic function on `\mathbb{C}` and defined as the (n+1)-th
derivative of the logarithm of the gamma function:
.. math::
\psi^{(n)} (z) := \frac{\mathrm{d}^{n+1}}{\mathrm{d} z^{n+1}} \log\Gamma(z).
Examples
========
Several special values are known:
>>> from sympy import S, polygamma
>>> polygamma(0, 1)
-EulerGamma
>>> polygamma(0, 1/S(2))
-2*log(2) - EulerGamma
>>> polygamma(0, 1/S(3))
-3*log(3)/2 - sqrt(3)*pi/6 - EulerGamma
>>> polygamma(0, 1/S(4))
-3*log(2) - pi/2 - EulerGamma
>>> polygamma(0, 2)
-EulerGamma + 1
>>> polygamma(0, 23)
-EulerGamma + 19093197/5173168
>>> from sympy import oo, I
>>> polygamma(0, oo)
oo
>>> polygamma(0, -oo)
oo
>>> polygamma(0, I*oo)
oo
>>> polygamma(0, -I*oo)
oo
Differentiation with respect to x is supported:
>>> from sympy import Symbol, diff
>>> x = Symbol("x")
>>> diff(polygamma(0, x), x)
polygamma(1, x)
>>> diff(polygamma(0, x), x, 2)
polygamma(2, x)
>>> diff(polygamma(0, x), x, 3)
polygamma(3, x)
>>> diff(polygamma(1, x), x)
polygamma(2, x)
>>> diff(polygamma(1, x), x, 2)
polygamma(3, x)
>>> diff(polygamma(2, x), x)
polygamma(3, x)
>>> diff(polygamma(2, x), x, 2)
polygamma(4, x)
>>> n = Symbol("n")
>>> diff(polygamma(n, x), x)
polygamma(n + 1, x)
>>> diff(polygamma(n, x), x, 2)
polygamma(n + 2, x)
We can rewrite polygamma functions in terms of harmonic numbers:
>>> from sympy import harmonic
>>> polygamma(0, x).rewrite(harmonic)
harmonic(x - 1) - EulerGamma
>>> polygamma(2, x).rewrite(harmonic)
2*harmonic(x - 1, 3) - 2*zeta(3)
>>> ni = Symbol("n", integer=True)
>>> polygamma(ni, x).rewrite(harmonic)
(-1)**(n + 1)*(-harmonic(x - 1, n + 1) + zeta(n + 1))*factorial(n)
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Polygamma_function
.. [2] http://mathworld.wolfram.com/PolygammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma/
.. [4] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
def fdiff(self, argindex=2):
if argindex == 2:
n, z = self.args[:2]
return polygamma(n + 1, z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_positive(self):
if self.args[1].is_positive and (self.args[0] > 0) == True:
return self.args[0].is_odd
def _eval_is_negative(self):
if self.args[1].is_positive and (self.args[0] > 0) == True:
return self.args[0].is_even
def _eval_is_real(self):
return self.args[0].is_real
def _eval_aseries(self, n, args0, x, logx):
from sympy import Order
if args0[1] != oo or not \
(self.args[0].is_Integer and self.args[0].is_nonnegative):
return super(polygamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[1]
N = self.args[0]
if N == 0:
# digamma function series
# Abramowitz & Stegun, p. 259, 6.3.18
r = log(z) - 1/(2*z)
o = None
if n < 2:
o = Order(1/z, x)
else:
m = ceiling((n + 1)//2)
l = [bernoulli(2*k) / (2*k*z**(2*k)) for k in range(1, m)]
r -= Add(*l)
o = Order(1/z**(2*m), x)
return r._eval_nseries(x, n, logx) + o
else:
# proper polygamma function
# Abramowitz & Stegun, p. 260, 6.4.10
# We return terms to order higher than O(x**n) on purpose
# -- otherwise we would not be able to return any terms for
# quite a long time!
fac = gamma(N)
e0 = fac + N*fac/(2*z)
m = ceiling((n + 1)//2)
for k in range(1, m):
fac = fac*(2*k + N - 1)*(2*k + N - 2) / ((2*k)*(2*k - 1))
e0 += bernoulli(2*k)*fac/z**(2*k)
o = Order(1/z**(2*m), x)
if n == 0:
o = Order(1/z, x)
elif n == 1:
o = Order(1/z**2, x)
r = e0._eval_nseries(z, n, logx) + o
return (-1 * (-1/z)**N * r)._eval_nseries(x, n, logx)
@classmethod
def eval(cls, n, z):
n, z = list(map(sympify, (n, z)))
from sympy import unpolarify
if n.is_integer:
if n.is_nonnegative:
nz = unpolarify(z)
if z != nz:
return polygamma(n, nz)
if n == -1:
return loggamma(z)
else:
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
if n.is_Number:
if n is S.Zero:
return S.Infinity
else:
return S.Zero
elif z.is_Integer:
if z.is_nonpositive:
return S.ComplexInfinity
else:
if n is S.Zero:
return -S.EulerGamma + harmonic(z - 1, 1)
elif n.is_odd:
return (-1)**(n + 1)*factorial(n)*zeta(n + 1, z)
if n == 0:
if z is S.NaN:
return S.NaN
elif z.is_Rational:
# TODO actually *any* n/m can be done, but that is messy
lookup = {S(1)/2: -2*log(2) - S.EulerGamma,
S(1)/3: -S.Pi/2/sqrt(3) - 3*log(3)/2 - S.EulerGamma,
S(1)/4: -S.Pi/2 - 3*log(2) - S.EulerGamma,
S(3)/4: -3*log(2) - S.EulerGamma + S.Pi/2,
S(2)/3: -3*log(3)/2 + S.Pi/2/sqrt(3) - S.EulerGamma}
if z > 0:
n = floor(z)
z0 = z - n
if z0 in lookup:
return lookup[z0] + Add(*[1/(z0 + k) for k in range(n)])
elif z < 0:
n = floor(1 - z)
z0 = z + n
if z0 in lookup:
return lookup[z0] - Add(*[1/(z0 - 1 - k) for k in range(n)])
elif z in (S.Infinity, S.NegativeInfinity):
return S.Infinity
else:
t = z.extract_multiplicatively(S.ImaginaryUnit)
if t in (S.Infinity, S.NegativeInfinity):
return S.Infinity
# TODO n == 1 also can do some rational z
def _eval_expand_func(self, **hints):
n, z = self.args
if n.is_Integer and n.is_nonnegative:
if z.is_Add:
coeff = z.args[0]
if coeff.is_Integer:
e = -(n + 1)
if coeff > 0:
tail = Add(*[Pow(
z - i, e) for i in range(1, int(coeff) + 1)])
else:
tail = -Add(*[Pow(
z + i, e) for i in range(0, int(-coeff))])
return polygamma(n, z - coeff) + (-1)**n*factorial(n)*tail
elif z.is_Mul:
coeff, z = z.as_two_terms()
if coeff.is_Integer and coeff.is_positive:
tail = [ polygamma(n, z + Rational(
i, coeff)) for i in range(0, int(coeff)) ]
if n == 0:
return Add(*tail)/coeff + log(coeff)
else:
return Add(*tail)/coeff**(n + 1)
z *= coeff
return polygamma(n, z)
def _eval_rewrite_as_zeta(self, n, z):
if n >= S.One:
return (-1)**(n + 1)*factorial(n)*zeta(n + 1, z)
else:
return self
def _eval_rewrite_as_harmonic(self, n, z):
if n.is_integer:
if n == S.Zero:
return harmonic(z - 1) - S.EulerGamma
else:
return S.NegativeOne**(n+1) * factorial(n) * (zeta(n+1) - harmonic(z-1, n+1))
def _eval_as_leading_term(self, x):
from sympy import Order
n, z = [a.as_leading_term(x) for a in self.args]
o = Order(z, x)
if n == 0 and o.contains(1/x):
return o.getn() * log(x)
else:
return self.func(n, z)
class loggamma(Function):
r"""
The ``loggamma`` function implements the logarithm of the
gamma function i.e, `\log\Gamma(x)`.
Examples
========
Several special values are known. For numerical integral
arguments we have:
>>> from sympy import loggamma
>>> loggamma(-2)
oo
>>> loggamma(0)
oo
>>> loggamma(1)
0
>>> loggamma(2)
0
>>> loggamma(3)
log(2)
and for symbolic values:
>>> from sympy import Symbol
>>> n = Symbol("n", integer=True, positive=True)
>>> loggamma(n)
log(gamma(n))
>>> loggamma(-n)
oo
for half-integral values:
>>> from sympy import S, pi
>>> loggamma(S(5)/2)
log(3*sqrt(pi)/4)
>>> loggamma(n/2)
log(2**(-n + 1)*sqrt(pi)*gamma(n)/gamma(n/2 + 1/2))
and general rational arguments:
>>> from sympy import expand_func
>>> L = loggamma(S(16)/3)
>>> expand_func(L).doit()
-5*log(3) + loggamma(1/3) + log(4) + log(7) + log(10) + log(13)
>>> L = loggamma(S(19)/4)
>>> expand_func(L).doit()
-4*log(4) + loggamma(3/4) + log(3) + log(7) + log(11) + log(15)
>>> L = loggamma(S(23)/7)
>>> expand_func(L).doit()
-3*log(7) + log(2) + loggamma(2/7) + log(9) + log(16)
The loggamma function has the following limits towards infinity:
>>> from sympy import oo
>>> loggamma(oo)
oo
>>> loggamma(-oo)
zoo
The loggamma function obeys the mirror symmetry
if `x \in \mathbb{C} \setminus \{-\infty, 0\}`:
>>> from sympy.abc import x
>>> from sympy import conjugate
>>> conjugate(loggamma(x))
loggamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(loggamma(x), x)
polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(loggamma(x), x, 0, 4)
-log(x) - EulerGamma*x + pi**2*x**2/12 + x**3*polygamma(2, 1)/6 + O(x**4)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> from sympy import I
>>> loggamma(5).evalf(30)
3.17805383034794561964694160130
>>> loggamma(I).evalf(20)
-0.65092319930185633889 - 1.8724366472624298171*I
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/LogGammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/LogGamma/
"""
@classmethod
def eval(cls, z):
z = sympify(z)
if z.is_integer:
if z.is_nonpositive:
return S.Infinity
elif z.is_positive:
return log(gamma(z))
elif z.is_rational:
p, q = z.as_numer_denom()
# Half-integral values:
if p.is_positive and q == 2:
return log(sqrt(S.Pi) * 2**(1 - p) * gamma(p) / gamma((p + 1)*S.Half))
if z is S.Infinity:
return S.Infinity
elif abs(z) is S.Infinity:
return S.ComplexInfinity
if z is S.NaN:
return S.NaN
def _eval_expand_func(self, **hints):
from sympy import Sum
z = self.args[0]
if z.is_Rational:
p, q = z.as_numer_denom()
# General rational arguments (u + p/q)
# Split z as n + p/q with p < q
n = p // q
p = p - n*q
if p.is_positive and q.is_positive and p < q:
k = Dummy("k")
if n.is_positive:
return loggamma(p / q) - n*log(q) + Sum(log((k - 1)*q + p), (k, 1, n))
elif n.is_negative:
return loggamma(p / q) - n*log(q) + S.Pi*S.ImaginaryUnit*n - Sum(log(k*q - p), (k, 1, -n))
elif n.is_zero:
return loggamma(p / q)
return self
def _eval_nseries(self, x, n, logx=None):
x0 = self.args[0].limit(x, 0)
if x0 is S.Zero:
f = self._eval_rewrite_as_intractable(*self.args)
return f._eval_nseries(x, n, logx)
return super(loggamma, self)._eval_nseries(x, n, logx)
def _eval_aseries(self, n, args0, x, logx):
from sympy import Order
if args0[0] != oo:
return super(loggamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[0]
m = min(n, ceiling((n + S(1))/2))
r = log(z)*(z - S(1)/2) - z + log(2*pi)/2
l = [bernoulli(2*k) / (2*k*(2*k - 1)*z**(2*k - 1)) for k in range(1, m)]
o = None
if m == 0:
o = Order(1, x)
else:
o = Order(1/z**(2*m - 1), x)
# It is very inefficient to first add the order and then do the nseries
return (r + Add(*l))._eval_nseries(x, n, logx) + o
def _eval_rewrite_as_intractable(self, z):
return log(gamma(z))
def _eval_is_real(self):
return self.args[0].is_real
def _eval_conjugate(self):
z = self.args[0]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(z.conjugate())
def fdiff(self, argindex=1):
if argindex == 1:
return polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def _sage_(self):
import sage.all as sage
return sage.log_gamma(self.args[0]._sage_())
def digamma(x):
r"""
The digamma function is the first derivative of the loggamma function i.e,
.. math::
\psi(x) := \frac{\mathrm{d}}{\mathrm{d} z} \log\Gamma(z)
= \frac{\Gamma'(z)}{\Gamma(z) }
In this case, ``digamma(z) = polygamma(0, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Digamma_function
.. [2] http://mathworld.wolfram.com/DigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(0, x)
def trigamma(x):
r"""
The trigamma function is the second derivative of the loggamma function i.e,
.. math::
\psi^{(1)}(z) := \frac{\mathrm{d}^{2}}{\mathrm{d} z^{2}} \log\Gamma(z).
In this case, ``trigamma(z) = polygamma(1, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] http://en.wikipedia.org/wiki/Trigamma_function
.. [2] http://mathworld.wolfram.com/TrigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(1, x)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import dhcp
from neutron.common import config as base_config
from neutron.common import constants
from neutron.openstack.common import log as logging
from neutron.tests import base
LOG = logging.getLogger(__name__)
class FakeIPAllocation:
def __init__(self, address, subnet_id=None):
self.ip_address = address
self.subnet_id = subnet_id
class DhcpOpt(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __str__(self):
return str(self.__dict__)
class FakePort1:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
admin_state_up = True
device_owner = 'foo1'
fixed_ips = [FakeIPAllocation('192.168.0.2')]
mac_address = '00:00:80:aa:bb:cc'
def __init__(self):
self.extra_dhcp_opts = []
class FakePort2:
id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
admin_state_up = False
device_owner = 'foo2'
fixed_ips = [FakeIPAllocation('fdca:3ba5:a17a:4ba3::2')]
mac_address = '00:00:f3:aa:bb:cc'
def __init__(self):
self.extra_dhcp_opts = []
class FakePort3:
id = '44444444-4444-4444-4444-444444444444'
admin_state_up = True
device_owner = 'foo3'
fixed_ips = [FakeIPAllocation('192.168.0.3'),
FakeIPAllocation('fdca:3ba5:a17a:4ba3::3')]
mac_address = '00:00:0f:aa:bb:cc'
def __init__(self):
self.extra_dhcp_opts = []
class FakeRouterPort:
id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr'
admin_state_up = True
device_owner = constants.DEVICE_OWNER_ROUTER_INTF
fixed_ips = [FakeIPAllocation('192.168.0.1',
'dddddddd-dddd-dddd-dddd-dddddddddddd')]
mac_address = '00:00:0f:rr:rr:rr'
def __init__(self):
self.extra_dhcp_opts = []
class FakePortMultipleAgents1:
id = 'rrrrrrrr-rrrr-rrrr-rrrr-rrrrrrrrrrrr'
admin_state_up = True
device_owner = constants.DEVICE_OWNER_DHCP
fixed_ips = [FakeIPAllocation('192.168.0.5',
'dddddddd-dddd-dddd-dddd-dddddddddddd')]
mac_address = '00:00:0f:dd:dd:dd'
def __init__(self):
self.extra_dhcp_opts = []
class FakePortMultipleAgents2:
id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
admin_state_up = True
device_owner = constants.DEVICE_OWNER_DHCP
fixed_ips = [FakeIPAllocation('192.168.0.6',
'dddddddd-dddd-dddd-dddd-dddddddddddd')]
mac_address = '00:00:0f:ee:ee:ee'
def __init__(self):
self.extra_dhcp_opts = []
class FakeV4HostRoute:
destination = '20.0.0.1/24'
nexthop = '20.0.0.1'
class FakeV4HostRouteGateway:
destination = '0.0.0.0/0'
nexthop = '10.0.0.1'
class FakeV6HostRoute:
destination = 'gdca:3ba5:a17a:4ba3::/64'
nexthop = 'gdca:3ba5:a17a:4ba3::1'
class FakeV4Subnet:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
host_routes = [FakeV4HostRoute]
dns_nameservers = ['8.8.8.8']
class FakeV4SubnetGatewayRoute:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
host_routes = [FakeV4HostRouteGateway]
dns_nameservers = ['8.8.8.8']
class FakeV4SubnetMultipleAgentsWithoutDnsProvided:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
dns_nameservers = []
host_routes = []
class FakeV4MultipleAgentsWithoutDnsProvided:
id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
subnets = [FakeV4SubnetMultipleAgentsWithoutDnsProvided()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(),
FakePortMultipleAgents1(), FakePortMultipleAgents2()]
namespace = 'qdhcp-ns'
class FakeV4SubnetMultipleAgentsWithDnsProvided:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
dns_nameservers = ['8.8.8.8']
host_routes = []
class FakeV4MultipleAgentsWithDnsProvided:
id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
subnets = [FakeV4SubnetMultipleAgentsWithDnsProvided()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort(),
FakePortMultipleAgents1(), FakePortMultipleAgents2()]
namespace = 'qdhcp-ns'
class FakeV6Subnet:
id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
ip_version = 6
cidr = 'fdca:3ba5:a17a:4ba3::/64'
gateway_ip = 'fdca:3ba5:a17a:4ba3::1'
enable_dhcp = True
host_routes = [FakeV6HostRoute]
dns_nameservers = ['gdca:3ba5:a17a:4ba3::1']
class FakeV4SubnetNoDHCP:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
ip_version = 4
cidr = '192.168.1.0/24'
gateway_ip = '192.168.1.1'
enable_dhcp = False
host_routes = []
dns_nameservers = []
class FakeV4SubnetNoGateway:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
ip_version = 4
cidr = '192.168.1.0/24'
gateway_ip = None
enable_dhcp = True
host_routes = []
dns_nameservers = []
class FakeV4SubnetNoRouter:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
ip_version = 4
cidr = '192.168.1.0/24'
gateway_ip = '192.168.1.1'
enable_dhcp = True
host_routes = []
dns_nameservers = []
class FakeV4Network:
id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
subnets = [FakeV4Subnet()]
ports = [FakePort1()]
namespace = 'qdhcp-ns'
class FakeV6Network:
id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
subnets = [FakeV6Subnet()]
ports = [FakePort2()]
namespace = 'qdhcp-ns'
class FakeDualNetwork:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4Subnet(), FakeV6Subnet()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
namespace = 'qdhcp-ns'
class FakeDualNetworkGatewayRoute:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4SubnetGatewayRoute(), FakeV6Subnet()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
namespace = 'qdhcp-ns'
class FakeDualNetworkSingleDHCP:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
namespace = 'qdhcp-ns'
class FakeV4NoGatewayNetwork:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4SubnetNoGateway()]
ports = [FakePort1()]
class FakeV4NetworkNoRouter:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4SubnetNoRouter()]
ports = [FakePort1()]
class FakeDualV4Pxe3Ports:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
namespace = 'qdhcp-ns'
def __init__(self, port_detail="portsSame"):
if port_detail == "portsSame":
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')]
self.ports[2].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')]
else:
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.2'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')]
self.ports[2].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')]
class FakeV4NetworkPxe2Ports:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
subnets = [FakeV4Subnet()]
ports = [FakePort1(), FakePort2(), FakeRouterPort()]
namespace = 'qdhcp-ns'
def __init__(self, port_detail="portsSame"):
if port_detail == "portsSame":
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
else:
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
class FakeV4NetworkPxe3Ports:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
subnets = [FakeV4Subnet()]
ports = [FakePort1(), FakePort2(), FakePort3(), FakeRouterPort()]
namespace = 'qdhcp-ns'
def __init__(self, port_detail="portsSame"):
if port_detail == "portsSame":
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[2].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.1.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.1.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
else:
self.ports[0].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.3'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.2'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux.0')]
self.ports[1].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.5'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.5'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux2.0')]
self.ports[2].extra_dhcp_opts = [
DhcpOpt(opt_name='tftp-server', opt_value='192.168.0.7'),
DhcpOpt(opt_name='server-ip-address', opt_value='192.168.0.7'),
DhcpOpt(opt_name='bootfile-name', opt_value='pxelinux3.0')]
class LocalChild(dhcp.DhcpLocalProcess):
PORTS = {4: [4], 6: [6]}
def __init__(self, *args, **kwargs):
super(LocalChild, self).__init__(*args, **kwargs)
self.called = []
def reload_allocations(self):
self.called.append('reload')
def restart(self):
self.called.append('restart')
def spawn_process(self):
self.called.append('spawn')
class TestBase(base.BaseTestCase):
def setUp(self):
super(TestBase, self).setUp()
root = os.path.dirname(os.path.dirname(__file__))
args = ['--config-file',
os.path.join(root, 'etc', 'neutron.conf.test')]
self.conf = config.setup_conf()
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(dhcp.OPTS)
config.register_interface_driver_opts_helper(self.conf)
instance = mock.patch("neutron.agent.linux.dhcp.DeviceManager")
self.mock_mgr = instance.start()
self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata',
default=True))
self.conf(args=args)
self.conf.set_override('state_path', '')
self.conf.use_namespaces = True
self.replace_p = mock.patch('neutron.agent.linux.utils.replace_file')
self.execute_p = mock.patch('neutron.agent.linux.utils.execute')
self.safe = self.replace_p.start()
self.execute = self.execute_p.start()
class TestDhcpBase(TestBase):
def test_existing_dhcp_networks_abstract_error(self):
self.assertRaises(NotImplementedError,
dhcp.DhcpBase.existing_dhcp_networks,
None, None)
def test_check_version_abstract_error(self):
self.assertRaises(NotImplementedError,
dhcp.DhcpBase.check_version)
def test_base_abc_error(self):
self.assertRaises(TypeError, dhcp.DhcpBase, None)
def test_restart(self):
class SubClass(dhcp.DhcpBase):
def __init__(self):
dhcp.DhcpBase.__init__(self, cfg.CONF, FakeV4Network(), None)
self.called = []
def enable(self):
self.called.append('enable')
def disable(self, retain_port=False):
self.called.append('disable %s' % retain_port)
def reload_allocations(self):
pass
@property
def active(self):
return True
c = SubClass()
c.restart()
self.assertEqual(c.called, ['disable True', 'enable'])
class TestDhcpLocalProcess(TestBase):
def test_active(self):
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = \
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
with mock.patch.object(LocalChild, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
lp = LocalChild(self.conf, FakeV4Network())
self.assertTrue(lp.active)
mock_open.assert_called_once_with('/proc/4/cmdline', 'r')
def test_active_none(self):
dummy_cmd_line = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
self.execute.return_value = (dummy_cmd_line, '')
with mock.patch.object(LocalChild, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=None)
lp = LocalChild(self.conf, FakeV4Network())
self.assertFalse(lp.active)
def test_active_cmd_mismatch(self):
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = \
'bbbbbbbb-bbbb-bbbb-aaaa-aaaaaaaaaaaa'
with mock.patch.object(LocalChild, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
lp = LocalChild(self.conf, FakeV4Network())
self.assertFalse(lp.active)
mock_open.assert_called_once_with('/proc/4/cmdline', 'r')
def test_get_conf_file_name(self):
tpl = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dev'
with mock.patch('os.path.isdir') as isdir:
isdir.return_value = False
with mock.patch('os.makedirs') as makedirs:
lp = LocalChild(self.conf, FakeV4Network())
self.assertEqual(lp.get_conf_file_name('dev'), tpl)
self.assertFalse(makedirs.called)
def test_get_conf_file_name_ensure_dir(self):
tpl = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dev'
with mock.patch('os.path.isdir') as isdir:
isdir.return_value = False
with mock.patch('os.makedirs') as makedirs:
lp = LocalChild(self.conf, FakeV4Network())
self.assertEqual(lp.get_conf_file_name('dev', True), tpl)
self.assertTrue(makedirs.called)
def test_enable_already_active(self):
with mock.patch.object(LocalChild, 'active') as patched:
patched.__get__ = mock.Mock(return_value=True)
lp = LocalChild(self.conf, FakeV4Network())
lp.enable()
self.assertEqual(lp.called, ['restart'])
def test_enable(self):
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['active', 'get_conf_file_name', 'interface_name']]
)
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=False)
mocks['get_conf_file_name'].return_value = '/dir'
mocks['interface_name'].__set__ = mock.Mock()
lp = LocalChild(self.conf,
FakeDualNetwork())
lp.enable()
self.mock_mgr.assert_has_calls(
[mock.call(self.conf, 'sudo', None),
mock.call().setup(mock.ANY, reuse_existing=True)])
self.assertEqual(lp.called, ['spawn'])
self.assertTrue(mocks['interface_name'].__set__.called)
def test_disable_not_active(self):
attrs_to_mock = dict([(a, mock.DEFAULT) for a in
['active', 'interface_name', 'pid']])
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=False)
mocks['pid'].__get__ = mock.Mock(return_value=5)
mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
with mock.patch.object(dhcp.LOG, 'debug') as log:
lp = LocalChild(self.conf, FakeDualNetwork())
lp.disable()
msg = log.call_args[0][0]
self.assertIn('stale', msg)
def test_disable_unknown_network(self):
attrs_to_mock = dict([(a, mock.DEFAULT) for a in
['active', 'interface_name', 'pid']])
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=False)
mocks['pid'].__get__ = mock.Mock(return_value=None)
mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
with mock.patch.object(dhcp.LOG, 'debug') as log:
lp = LocalChild(self.conf, FakeDualNetwork())
lp.disable()
msg = log.call_args[0][0]
self.assertIn('No DHCP', msg)
def test_disable_retain_port(self):
attrs_to_mock = dict([(a, mock.DEFAULT) for a in
['active', 'interface_name', 'pid']])
network = FakeDualNetwork()
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=True)
mocks['pid'].__get__ = mock.Mock(return_value=5)
mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
lp = LocalChild(self.conf, network)
lp.disable(retain_port=True)
exp_args = ['kill', '-9', 5]
self.execute.assert_called_once_with(exp_args, 'sudo')
def test_disable(self):
attrs_to_mock = dict([(a, mock.DEFAULT) for a in
['active', 'interface_name', 'pid']])
network = FakeDualNetwork()
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=True)
mocks['pid'].__get__ = mock.Mock(return_value=5)
mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
lp = LocalChild(self.conf, network)
with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip:
lp.disable()
self.mock_mgr.assert_has_calls([mock.call(self.conf, 'sudo', None),
mock.call().destroy(network, 'tap0')])
exp_args = ['kill', '-9', 5]
self.execute.assert_called_once_with(exp_args, 'sudo')
self.assertEqual(ip.return_value.netns.delete.call_count, 0)
def test_disable_delete_ns(self):
self.conf.set_override('dhcp_delete_namespaces', True)
attrs_to_mock = dict([(a, mock.DEFAULT) for a in ['active', 'pid']])
with mock.patch.multiple(LocalChild, **attrs_to_mock) as mocks:
mocks['active'].__get__ = mock.Mock(return_value=False)
mocks['pid'].__get__ = mock.Mock(return_value=False)
lp = LocalChild(self.conf, FakeDualNetwork())
with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip:
lp.disable()
ip.return_value.netns.delete.assert_called_with('qdhcp-ns')
def test_pid(self):
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.read.return_value = '5'
lp = LocalChild(self.conf, FakeDualNetwork())
self.assertEqual(lp.pid, 5)
def test_pid_no_an_int(self):
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.read.return_value = 'foo'
lp = LocalChild(self.conf, FakeDualNetwork())
self.assertIsNone(lp.pid)
def test_pid_invalid_file(self):
with mock.patch.object(LocalChild, 'get_conf_file_name') as conf_file:
conf_file.return_value = '.doesnotexist/pid'
lp = LocalChild(self.conf, FakeDualNetwork())
self.assertIsNone(lp.pid)
def test_get_interface_name(self):
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.read.return_value = 'tap0'
lp = LocalChild(self.conf, FakeDualNetwork())
self.assertEqual(lp.interface_name, 'tap0')
def test_set_interface_name(self):
with mock.patch('neutron.agent.linux.utils.replace_file') as replace:
lp = LocalChild(self.conf, FakeDualNetwork())
with mock.patch.object(lp, 'get_conf_file_name') as conf_file:
conf_file.return_value = '/interface'
lp.interface_name = 'tap0'
conf_file.assert_called_once_with('interface',
ensure_conf_dir=True)
replace.assert_called_once_with(mock.ANY, 'tap0')
class TestDnsmasq(TestBase):
def _test_spawn(self, extra_options, network=FakeDualNetwork(),
max_leases=16777216):
def mock_get_conf_file_name(kind, ensure_conf_dir=False):
return '/dhcp/%s/%s' % (network.id, kind)
def fake_argv(index):
if index == 0:
return '/usr/local/bin/neutron-dhcp-agent'
else:
raise IndexError
expected = [
'ip',
'netns',
'exec',
'qdhcp-ns',
'env',
'NEUTRON_NETWORK_ID=%s' % network.id,
'dnsmasq',
'--no-hosts',
'--no-resolv',
'--strict-order',
'--bind-interfaces',
'--interface=tap0',
'--except-interface=lo',
'--pid-file=/dhcp/%s/pid' % network.id,
'--dhcp-hostsfile=/dhcp/%s/host' % network.id,
'--addn-hosts=/dhcp/%s/addn_hosts' % network.id,
'--dhcp-optsfile=/dhcp/%s/opts' % network.id,
'--leasefile-ro']
expected.extend(
'--dhcp-range=set:tag%d,%s,static,86400s' %
(i, s.cidr.split('/')[0])
for i, s in enumerate(network.subnets)
)
expected.append('--dhcp-lease-max=%d' % max_leases)
expected.extend(extra_options)
self.execute.return_value = ('', '')
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['_output_opts_file', 'get_conf_file_name', 'interface_name']]
)
with mock.patch.multiple(dhcp.Dnsmasq, **attrs_to_mock) as mocks:
mocks['get_conf_file_name'].side_effect = mock_get_conf_file_name
mocks['_output_opts_file'].return_value = (
'/dhcp/%s/opts' % network.id
)
mocks['interface_name'].__get__ = mock.Mock(return_value='tap0')
with mock.patch.object(dhcp.sys, 'argv') as argv:
argv.__getitem__.side_effect = fake_argv
dm = dhcp.Dnsmasq(self.conf, network, version=float(2.59))
dm.spawn_process()
self.assertTrue(mocks['_output_opts_file'].called)
self.execute.assert_called_once_with(expected,
root_helper='sudo',
check_exit_code=True)
def test_spawn(self):
self._test_spawn(['--conf-file=', '--domain=openstacklocal'])
def test_spawn_cfg_config_file(self):
self.conf.set_override('dnsmasq_config_file', '/foo')
self._test_spawn(['--conf-file=/foo', '--domain=openstacklocal'])
def test_spawn_no_dhcp_domain(self):
self.conf.set_override('dhcp_domain', '')
self._test_spawn(['--conf-file='])
def test_spawn_cfg_dns_server(self):
self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8'])
self._test_spawn(['--conf-file=',
'--server=8.8.8.8',
'--domain=openstacklocal'])
def test_spawn_cfg_multiple_dns_server(self):
self.conf.set_override('dnsmasq_dns_servers', ['8.8.8.8',
'9.9.9.9'])
self._test_spawn(['--conf-file=',
'--server=8.8.8.8',
'--server=9.9.9.9',
'--domain=openstacklocal'])
def test_spawn_max_leases_is_smaller_than_cap(self):
self._test_spawn(
['--conf-file=', '--domain=openstacklocal'],
network=FakeV4Network(),
max_leases=256)
def test_output_opts_file(self):
fake_v6 = 'gdca:3ba5:a17a:4ba3::1'
fake_v6_cidr = 'gdca:3ba5:a17a:4ba3::/64'
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:tag1,option:dns-server,%s
tag:tag1,option:classless-static-route,%s,%s
tag:tag1,249,%s,%s""".lstrip() % (fake_v6,
fake_v6_cidr, fake_v6,
fake_v6_cidr, fake_v6)
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeDualNetwork(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_gateway_route(self):
fake_v6 = 'gdca:3ba5:a17a:4ba3::1'
fake_v6_cidr = 'gdca:3ba5:a17a:4ba3::/64'
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:router,10.0.0.1
tag:tag1,option:dns-server,%s
tag:tag1,option:classless-static-route,%s,%s
tag:tag1,249,%s,%s""".lstrip() % (fake_v6,
fake_v6_cidr, fake_v6,
fake_v6_cidr, fake_v6)
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeDualNetworkGatewayRoute(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_multiple_agents_without_dns_provided(self):
expected = """
tag:tag0,option:router,192.168.0.1
tag:tag0,option:dns-server,192.168.0.5,192.168.0.6""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf,
FakeV4MultipleAgentsWithoutDnsProvided(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_multiple_agents_with_dns_provided(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:router,192.168.0.1""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf,
FakeV4MultipleAgentsWithDnsProvided(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_single_dhcp(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeDualNetworkSingleDHCP(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_single_dhcp_ver2_48(self):
expected = """
tag0,option:dns-server,8.8.8.8
tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag0,249,20.0.0.1/24,20.0.0.1
tag0,option:router,192.168.0.1""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeDualNetworkSingleDHCP(),
version=float(2.48))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_no_gateway(self):
expected = """
tag:tag0,option:classless-static-route,169.254.169.254/32,192.168.1.1
tag:tag0,249,169.254.169.254/32,192.168.1.1
tag:tag0,option:router""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeV4NoGatewayNetwork(),
version=float(2.59))
with mock.patch.object(dm, '_make_subnet_interface_ip_map') as ipm:
ipm.return_value = {FakeV4SubnetNoGateway.id: '192.168.1.1'}
dm._output_opts_file()
self.assertTrue(ipm.called)
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_no_neutron_router_on_subnet(self):
expected = """
tag:tag0,option:classless-static-route,169.254.169.254/32,192.168.1.2
tag:tag0,249,169.254.169.254/32,192.168.1.2
tag:tag0,option:router,192.168.1.1""".lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeV4NetworkNoRouter(),
version=float(2.59))
with mock.patch.object(dm, '_make_subnet_interface_ip_map') as ipm:
ipm.return_value = {FakeV4SubnetNoRouter.id: '192.168.1.2'}
dm._output_opts_file()
self.assertTrue(ipm.called)
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_pxe_2port_1net(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:tftp-server,192.168.0.3
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:server-ip-address,192.168.0.2
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:bootfile-name,pxelinux.0
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:tftp-server,192.168.0.3
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:server-ip-address,192.168.0.2
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:bootfile-name,pxelinux.0"""
expected = expected.lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
fp = FakeV4NetworkPxe2Ports()
dm = dhcp.Dnsmasq(self.conf, fp, version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_pxe_2port_1net_diff_details(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:tftp-server,192.168.0.3
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:server-ip-address,192.168.0.2
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:bootfile-name,pxelinux.0
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:tftp-server,192.168.0.5
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:server-ip-address,192.168.0.5
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:bootfile-name,pxelinux.0"""
expected = expected.lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeV4NetworkPxe2Ports("portsDiff"),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_pxe_3port_1net_diff_details(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:tftp-server,192.168.0.3
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:server-ip-address,192.168.0.2
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:bootfile-name,pxelinux.0
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:tftp-server,192.168.0.5
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:server-ip-address,192.168.0.5
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:bootfile-name,pxelinux2.0
tag:44444444-4444-4444-4444-444444444444,option:tftp-server,192.168.0.7
tag:44444444-4444-4444-4444-444444444444,option:server-ip-address,192.168.0.7
tag:44444444-4444-4444-4444-444444444444,option:bootfile-name,pxelinux3.0"""
expected = expected.lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf,
FakeV4NetworkPxe3Ports("portsDifferent"),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
def test_output_opts_file_pxe_3port_2net(self):
expected = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:tftp-server,192.168.0.3
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:server-ip-address,192.168.0.2
tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,option:bootfile-name,pxelinux.0
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:tftp-server,192.168.1.3
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:server-ip-address,192.168.1.2
tag:ffffffff-ffff-ffff-ffff-ffffffffffff,option:bootfile-name,pxelinux2.0
tag:44444444-4444-4444-4444-444444444444,option:tftp-server,192.168.1.3
tag:44444444-4444-4444-4444-444444444444,option:server-ip-address,192.168.1.2
tag:44444444-4444-4444-4444-444444444444,option:bootfile-name,pxelinux3.0"""
expected = expected.lstrip()
with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn:
conf_fn.return_value = '/foo/opts'
dm = dhcp.Dnsmasq(self.conf, FakeDualV4Pxe3Ports(),
version=float(2.59))
dm._output_opts_file()
self.safe.assert_called_once_with('/foo/opts', expected)
@property
def _test_reload_allocation_data(self):
exp_host_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/host'
exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal,'
'192.168.0.2\n'
'00:00:f3:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--2.'
'openstacklocal,[fdca:3ba5:a17a:4ba3::2]\n'
'00:00:0f:aa:bb:cc,host-192-168-0-3.openstacklocal,'
'192.168.0.3\n'
'00:00:0f:aa:bb:cc,host-fdca-3ba5-a17a-4ba3--3.'
'openstacklocal,[fdca:3ba5:a17a:4ba3::3]\n'
'00:00:0f:rr:rr:rr,host-192-168-0-1.openstacklocal,'
'192.168.0.1\n').lstrip()
exp_addn_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/addn_hosts'
exp_addn_data = (
'192.168.0.2\t'
'host-192-168-0-2.openstacklocal host-192-168-0-2\n'
'fdca:3ba5:a17a:4ba3::2\t'
'host-fdca-3ba5-a17a-4ba3--2.openstacklocal '
'host-fdca-3ba5-a17a-4ba3--2\n'
'192.168.0.3\thost-192-168-0-3.openstacklocal '
'host-192-168-0-3\n'
'fdca:3ba5:a17a:4ba3::3\t'
'host-fdca-3ba5-a17a-4ba3--3.openstacklocal '
'host-fdca-3ba5-a17a-4ba3--3\n'
'192.168.0.1\t'
'host-192-168-0-1.openstacklocal '
'host-192-168-0-1\n'
).lstrip()
exp_opt_name = '/dhcp/cccccccc-cccc-cccc-cccc-cccccccccccc/opts'
fake_v6 = 'gdca:3ba5:a17a:4ba3::1'
fake_v6_cidr = 'gdca:3ba5:a17a:4ba3::/64'
exp_opt_data = """
tag:tag0,option:dns-server,8.8.8.8
tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1
tag:tag0,249,20.0.0.1/24,20.0.0.1
tag:tag0,option:router,192.168.0.1
tag:tag1,option:dns-server,%s
tag:tag1,option:classless-static-route,%s,%s
tag:tag1,249,%s,%s""".lstrip() % (fake_v6,
fake_v6_cidr, fake_v6,
fake_v6_cidr, fake_v6)
return (exp_host_name, exp_host_data,
exp_addn_name, exp_addn_data,
exp_opt_name, exp_opt_data,)
def test_reload_allocations(self):
(exp_host_name, exp_host_data,
exp_addn_name, exp_addn_data,
exp_opt_name, exp_opt_data,) = self._test_reload_allocation_data
exp_args = ['kill', '-HUP', 5]
with mock.patch('os.path.isdir') as isdir:
isdir.return_value = True
with mock.patch.object(dhcp.Dnsmasq, 'active') as active:
active.__get__ = mock.Mock(return_value=True)
with mock.patch.object(dhcp.Dnsmasq, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=5)
dm = dhcp.Dnsmasq(self.conf, FakeDualNetwork(),
version=float(2.59))
method_name = '_make_subnet_interface_ip_map'
with mock.patch.object(dhcp.Dnsmasq,
method_name) as ip_map:
ip_map.return_value = {}
dm.reload_allocations()
self.assertTrue(ip_map.called)
self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data),
mock.call(exp_addn_name, exp_addn_data),
mock.call(exp_opt_name, exp_opt_data)])
self.execute.assert_called_once_with(exp_args, 'sudo')
def test_reload_allocations_stale_pid(self):
(exp_host_name, exp_host_data,
exp_addn_name, exp_addn_data,
exp_opt_name, exp_opt_data,) = self._test_reload_allocation_data
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = None
with mock.patch('os.path.isdir') as isdir:
isdir.return_value = True
with mock.patch.object(dhcp.Dnsmasq, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=5)
dm = dhcp.Dnsmasq(self.conf, FakeDualNetwork(),
version=float(2.59))
method_name = '_make_subnet_interface_ip_map'
with mock.patch.object(dhcp.Dnsmasq, method_name) as ipmap:
ipmap.return_value = {}
dm.reload_allocations()
self.assertTrue(ipmap.called)
self.safe.assert_has_calls([
mock.call(exp_host_name, exp_host_data),
mock.call(exp_addn_name, exp_addn_data),
mock.call(exp_opt_name, exp_opt_data),
])
mock_open.assert_called_once_with('/proc/5/cmdline', 'r')
def test_release_unused_leases(self):
dnsmasq = dhcp.Dnsmasq(self.conf, FakeDualNetwork())
ip1 = '192.168.1.2'
mac1 = '00:00:80:aa:bb:cc'
ip2 = '192.168.1.3'
mac2 = '00:00:80:cc:bb:aa'
old_leases = set([(ip1, mac1), (ip2, mac2)])
dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases)
dnsmasq._output_hosts_file = mock.Mock()
dnsmasq._release_lease = mock.Mock()
dnsmasq.network.ports = []
dnsmasq._release_unused_leases()
dnsmasq._release_lease.assert_has_calls([mock.call(mac1, ip1),
mock.call(mac2, ip2)],
any_order=True)
def test_release_unused_leases_one_lease(self):
dnsmasq = dhcp.Dnsmasq(self.conf, FakeDualNetwork())
ip1 = '192.168.0.2'
mac1 = '00:00:80:aa:bb:cc'
ip2 = '192.168.0.3'
mac2 = '00:00:80:cc:bb:aa'
old_leases = set([(ip1, mac1), (ip2, mac2)])
dnsmasq._read_hosts_file_leases = mock.Mock(return_value=old_leases)
dnsmasq._output_hosts_file = mock.Mock()
dnsmasq._release_lease = mock.Mock()
dnsmasq.network.ports = [FakePort1()]
dnsmasq._release_unused_leases()
dnsmasq._release_lease.assert_has_calls([mock.call(mac2, ip2)],
any_order=True)
def test_read_hosts_file_leases(self):
filename = '/path/to/file'
with mock.patch('os.path.exists') as mock_exists:
mock_exists.return_value = True
with mock.patch('__builtin__.open') as mock_open:
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
lines = ["00:00:80:aa:bb:cc,inst-name,192.168.0.1"]
mock_open.return_value.readlines.return_value = lines
dnsmasq = dhcp.Dnsmasq(self.conf, FakeDualNetwork())
leases = dnsmasq._read_hosts_file_leases(filename)
self.assertEqual(set([("192.168.0.1", "00:00:80:aa:bb:cc")]), leases)
mock_exists.assert_called_once_with(filename)
mock_open.assert_called_once_with(filename)
def test_make_subnet_interface_ip_map(self):
with mock.patch('neutron.agent.linux.ip_lib.IPDevice') as ip_dev:
ip_dev.return_value.addr.list.return_value = [
{'cidr': '192.168.0.1/24'}
]
dm = dhcp.Dnsmasq(self.conf,
FakeDualNetwork())
self.assertEqual(
dm._make_subnet_interface_ip_map(),
{FakeV4Subnet.id: '192.168.0.1'}
)
def test_remove_config_files(self):
net = FakeV4Network()
path = '/opt/data/neutron/dhcp'
self.conf.dhcp_confs = path
with mock.patch('shutil.rmtree') as rmtree:
lp = LocalChild(self.conf, net)
lp._remove_config_files()
rmtree.assert_called_once_with(os.path.join(path, net.id),
ignore_errors=True)
def test_existing_dhcp_networks(self):
path = '/opt/data/neutron/dhcp'
self.conf.dhcp_confs = path
cases = {
# network_uuid --> is_dhcp_alive?
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa': True,
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb': False,
'not_uuid_like_name': True
}
def active_fake(self, instance, cls):
return cases[instance.network.id]
with mock.patch('os.listdir') as mock_listdir:
with mock.patch.object(dhcp.Dnsmasq, 'active') as mock_active:
mock_active.__get__ = active_fake
mock_listdir.return_value = cases.keys()
result = dhcp.Dnsmasq.existing_dhcp_networks(self.conf, 'sudo')
mock_listdir.assert_called_once_with(path)
self.assertEqual(['aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'],
result)
def _check_version(self, cmd_out, expected_value):
with mock.patch('neutron.agent.linux.utils.execute') as cmd:
cmd.return_value = cmd_out
result = dhcp.Dnsmasq.check_version()
self.assertEqual(result, expected_value)
def test_check_minimum_version(self):
self._check_version('Dnsmasq version 2.59 Copyright (c)...',
float(2.59))
def test_check_future_version(self):
self._check_version('Dnsmasq version 2.65 Copyright (c)...',
float(2.65))
def test_check_fail_version(self):
self._check_version('Dnsmasq version 2.48 Copyright (c)...',
float(2.48))
def test_check_version_failed_cmd_execution(self):
self._check_version('Error while executing command', 0)
|
|
from importlib import import_module
import itertools
import re
from django.apps import apps
from django.conf import settings
from django.contrib.sites.requests import RequestSite
from django.contrib.admin.models import LogEntry
from django.contrib.auth import SESSION_KEY, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import (AuthenticationForm, PasswordChangeForm,
SetPasswordForm)
from django.contrib.auth.models import User
from django.contrib.auth.views import login as login_view
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import QueryDict, HttpRequest
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import urlparse, ParseResult
from django.utils.translation import LANGUAGE_SESSION_KEY
from django.test import TestCase, ignore_warnings, override_settings
from django.test.utils import patch_logger
from django.middleware.csrf import CsrfViewMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
# Needed so model is installed when tests are run independently:
from .custom_user import CustomUser # NOQA
from .settings import AUTH_TEMPLATES
from .utils import skipIfCustomUser
@override_settings(
LANGUAGES=(
('en', 'English'),
),
LANGUAGE_CODE='en',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='django.contrib.auth.tests.urls',
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
fixtures = ['authtestdata.json']
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@skipIfCustomUser
@override_settings(ROOT_URLCONF='django.contrib.auth.urls')
class AuthViewNamedURLTests(AuthViewsTestCase):
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
@skipIfCustomUser
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertNotIn('<html>', message.get_payload(0).get_payload())
self.assertIn('<html>', message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@ignore_warnings(category=RemovedInDjango20Warning)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://adminsite.com", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existent user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# #16919 -- The ``password_reset_confirm`` view should pass the user
# object to the ``SetPasswordForm``, even on GET requests.
# For this test, we render ``{{ form.user }}`` in the template
# ``registration/password_reset_confirm.html`` so that we can test this.
username = User.objects.get(email='staffmember@example.com').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
fixtures = ['custom_user.json']
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
@skipIfCustomUser
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@override_settings(MIDDLEWARE_CLASSES=list(settings.MIDDLEWARE_CLASSES) + [
'django.contrib.auth.middleware.SessionAuthenticationMiddleware'
])
class SessionAuthenticationTests(AuthViewsTestCase):
def test_user_password_change_updates_session(self):
"""
#21649 - Ensure contrib.auth.views.password_change updates the user's
session auth hash after a password change so the session isn't logged out.
"""
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
# if the hash isn't updated, retrieving the redirection page will fail.
self.assertRedirects(response, '/password_change/done/')
@skipIfCustomUser
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertIsInstance(response.context['form'], AuthenticationForm)
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["CSRF_COOKIE_USED"] = True
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
def test_session_key_flushed_on_login(self):
"""
To avoid reusing another user's session, ensure a new, empty session is
created if the existing session corresponds to a different authenticated
user.
"""
self.login()
original_session_key = self.client.session.session_key
self.login(username='staff')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_session_key_flushed_on_login_after_password_change(self):
"""
As above, but same user logging in after a password change.
"""
self.login()
original_session_key = self.client.session.session_key
# If no password change, session key should not be flushed.
self.login()
self.assertEqual(original_session_key, self.client.session.session_key)
user = User.objects.get(username='testclient')
user.set_password('foobar')
user.save()
self.login(password='foobar')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_login_session_without_hash_session_key(self):
"""
Session without django.contrib.auth.HASH_SESSION_KEY should login
without an exception.
"""
user = User.objects.get(username='testclient')
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[SESSION_KEY] = user.id
session.save()
original_session_key = session.session_key
self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key
self.login()
self.assertNotEqual(original_session_key, self.client.session.session_key)
@skipIfCustomUser
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@skipIfCustomUser
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
@skipIfCustomUser
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertNotIn(SESSION_KEY, self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertIn('site', response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[LANGUAGE_SESSION_KEY] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'pl')
@skipIfCustomUser
@override_settings(
# Redirect in test_user_change_password will fail if session auth hash
# isn't updated after password change (#21649)
MIDDLEWARE_CLASSES=list(settings.MIDDLEWARE_CLASSES) + [
'django.contrib.auth.middleware.SessionAuthenticationMiddleware'
],
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='django.contrib.auth.tests.urls_admin',
)
class ChangelistTests(AuthViewsTestCase):
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=1)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get('/admin/auth/user/?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk, data)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post('/admin/auth/user/%s/' % self.admin.pk,
self.get_user_data(self.admin)
)
self.assertRedirects(response, '/admin/auth/user/')
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
response = self.client.post('/admin/auth/user/%s/password/' % self.admin.pk, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, '/admin/auth/user/%s/' % self.admin.pk)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
def test_user_change_different_user_password(self):
u = User.objects.get(email='staffmember@example.com')
response = self.client.post('/admin/auth/user/%s/password/' % u.pk, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, '/admin/auth/user/%s/' % u.pk)
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import argparse
import glob
import itertools
import json
import os
import six
import string
import subprocess
import tempfile
import uuid
import numpy as np
ARROW_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
# Control for flakiness
np.random.seed(12345)
def load_version_from_pom():
import xml.etree.ElementTree as ET
tree = ET.parse(os.path.join(ARROW_HOME, 'java', 'pom.xml'))
version_tag = list(tree.getroot().findall('{http://maven.apache.org/POM/4.0.0}version'))[0]
return version_tag.text
def guid():
return uuid.uuid4().hex
# from pandas
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def str_from_bytes(x):
if six.PY2:
return x
else:
return x.decode('utf-8')
# from the merge_arrow_pr.py script
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print('Command failed: %s' % ' '.join(cmd))
print('With output:')
print('--------------')
print(str_from_bytes(e.output))
print('--------------')
raise e
return str_from_bytes(output)
# ----------------------------------------------------------------------
# Data generation
class DataType(object):
def __init__(self, name, nullable=True):
self.name = name
self.nullable = nullable
def get_json(self):
return OrderedDict([
('name', self.name),
('type', self._get_type()),
('nullable', self.nullable),
('children', self._get_children()),
('typeLayout', self._get_type_layout())
])
def _make_is_valid(self, size):
if self.nullable:
return np.random.randint(0, 2, size=size)
else:
return np.ones(size)
class Column(object):
def __init__(self, name, count):
self.name = name
self.count = count
def _get_children(self):
return []
def _get_buffers(self):
return []
def get_json(self):
entries = [
('name', self.name),
('count', self.count)
]
buffers = self._get_buffers()
entries.extend(buffers)
children = self._get_children()
if len(children) > 0:
entries.append(('children', children))
return OrderedDict(entries)
class PrimitiveType(DataType):
def _get_children(self):
return []
def _get_type_layout(self):
return OrderedDict([
('vectors',
[OrderedDict([('type', 'VALIDITY'),
('typeBitWidth', 1)]),
OrderedDict([('type', 'DATA'),
('typeBitWidth', self.bit_width)])])])
class PrimitiveColumn(Column):
def __init__(self, name, count, is_valid, values):
Column.__init__(self, name, count)
self.is_valid = is_valid
self.values = values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('DATA', list(self.values))
]
TEST_INT_MIN = - 2**31 + 1
TEST_INT_MAX = 2**31 - 1
class IntegerType(PrimitiveType):
def __init__(self, name, is_signed, bit_width, nullable=True):
PrimitiveType.__init__(self, name, nullable=nullable)
self.is_signed = is_signed
self.bit_width = bit_width
@property
def numpy_type(self):
return ('int' if self.is_signed else 'uint') + str(self.bit_width)
def _get_type(self):
return OrderedDict([
('name', 'int'),
('isSigned', self.is_signed),
('bitWidth', self.bit_width)
])
def generate_column(self, size):
iinfo = np.iinfo(self.numpy_type)
values = [int(x) for x in
np.random.randint(max(iinfo.min, TEST_INT_MIN),
min(iinfo.max, TEST_INT_MAX),
size=size)]
is_valid = self._make_is_valid(size)
return PrimitiveColumn(self.name, size, is_valid, values)
class FloatingPointType(PrimitiveType):
def __init__(self, name, bit_width, nullable=True):
PrimitiveType.__init__(self, name, nullable=nullable)
self.bit_width = bit_width
self.precision = {
16: 'HALF',
32: 'SINGLE',
64: 'DOUBLE'
}[self.bit_width]
@property
def numpy_type(self):
return 'float' + str(self.bit_width)
def _get_type(self):
return OrderedDict([
('name', 'floatingpoint'),
('precision', self.precision)
])
def generate_column(self, size):
values = np.random.randn(size) * 1000
values = np.round(values, 3)
is_valid = self._make_is_valid(size)
return PrimitiveColumn(self.name, size, is_valid, values)
class BooleanType(PrimitiveType):
bit_width = 1
def _get_type(self):
return OrderedDict([('name', 'bool')])
@property
def numpy_type(self):
return 'bool'
def generate_column(self, size):
values = list(map(bool, np.random.randint(0, 2, size=size)))
is_valid = self._make_is_valid(size)
return PrimitiveColumn(self.name, size, is_valid, values)
class BinaryType(PrimitiveType):
@property
def numpy_type(self):
return object
@property
def column_class(self):
return BinaryColumn
def _get_type(self):
return OrderedDict([('name', 'binary')])
def _get_type_layout(self):
return OrderedDict([
('vectors',
[OrderedDict([('type', 'VALIDITY'),
('typeBitWidth', 1)]),
OrderedDict([('type', 'OFFSET'),
('typeBitWidth', 32)]),
OrderedDict([('type', 'DATA'),
('typeBitWidth', 8)])])])
def generate_column(self, size):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
draw = (np.random.randint(0, 255, size=K)
.astype(np.uint8)
.tostring())
values.append(draw)
else:
values.append("")
return self.column_class(self.name, size, is_valid, values)
class StringType(BinaryType):
@property
def column_class(self):
return StringColumn
def _get_type(self):
return OrderedDict([('name', 'utf8')])
def generate_column(self, size):
K = 7
is_valid = self._make_is_valid(size)
values = []
for i in range(size):
if is_valid[i]:
values.append(rands(K))
else:
values.append("")
return self.column_class(self.name, size, is_valid, values)
class JSONSchema(object):
def __init__(self, fields):
self.fields = fields
def get_json(self):
return OrderedDict([
('fields', [field.get_json() for field in self.fields])
])
class BinaryColumn(PrimitiveColumn):
def _encode_value(self, x):
return ''.join('{:02x}'.format(c).upper() for c in x)
def _get_buffers(self):
offset = 0
offsets = [0]
data = []
for i, v in enumerate(self.values):
if self.is_valid[i]:
offset += len(v)
else:
v = ""
offsets.append(offset)
data.append(self._encode_value(v))
return [
('VALIDITY', [int(x) for x in self.is_valid]),
('OFFSET', offsets),
('DATA', data)
]
class StringColumn(BinaryColumn):
def _encode_value(self, x):
return x
class ListType(DataType):
def __init__(self, name, value_type, nullable=True):
DataType.__init__(self, name, nullable=nullable)
self.value_type = value_type
def _get_type(self):
return OrderedDict([
('name', 'list')
])
def _get_children(self):
return [self.value_type.get_json()]
def _get_type_layout(self):
return OrderedDict([
('vectors',
[OrderedDict([('type', 'VALIDITY'),
('typeBitWidth', 1)]),
OrderedDict([('type', 'OFFSET'),
('typeBitWidth', 32)])])])
def generate_column(self, size):
MAX_LIST_SIZE = 4
is_valid = self._make_is_valid(size)
list_sizes = np.random.randint(0, MAX_LIST_SIZE + 1, size=size)
offsets = [0]
offset = 0
for i in range(size):
if is_valid[i]:
offset += int(list_sizes[i])
offsets.append(offset)
# The offset now is the total number of elements in the child array
values = self.value_type.generate_column(offset)
return ListColumn(self.name, size, is_valid, offsets, values)
class ListColumn(Column):
def __init__(self, name, count, is_valid, offsets, values):
Column.__init__(self, name, count)
self.is_valid = is_valid
self.offsets = offsets
self.values = values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid]),
('OFFSET', list(self.offsets))
]
def _get_children(self):
return [self.values.get_json()]
class StructType(DataType):
def __init__(self, name, field_types, nullable=True):
DataType.__init__(self, name, nullable=nullable)
self.field_types = field_types
def _get_type(self):
return OrderedDict([
('name', 'struct')
])
def _get_children(self):
return [type_.get_json() for type_ in self.field_types]
def _get_type_layout(self):
return OrderedDict([
('vectors',
[OrderedDict([('type', 'VALIDITY'),
('typeBitWidth', 1)])])])
def generate_column(self, size):
is_valid = self._make_is_valid(size)
field_values = [type_.generate_column(size)
for type_ in self.field_types]
return StructColumn(self.name, size, is_valid, field_values)
class StructColumn(Column):
def __init__(self, name, count, is_valid, field_values):
Column.__init__(self, name, count)
self.is_valid = is_valid
self.field_values = field_values
def _get_buffers(self):
return [
('VALIDITY', [int(v) for v in self.is_valid])
]
def _get_children(self):
return [field.get_json() for field in self.field_values]
class JSONRecordBatch(object):
def __init__(self, count, columns):
self.count = count
self.columns = columns
def get_json(self):
return OrderedDict([
('count', self.count),
('columns', [col.get_json() for col in self.columns])
])
class JSONFile(object):
def __init__(self, schema, batches):
self.schema = schema
self.batches = batches
def get_json(self):
return OrderedDict([
('schema', self.schema.get_json()),
('batches', [batch.get_json() for batch in self.batches])
])
def write(self, path):
with open(path, 'wb') as f:
f.write(json.dumps(self.get_json(), indent=2).encode('utf-8'))
def get_field(name, type_, nullable=True):
if type_ == 'binary':
return BinaryType(name, nullable=nullable)
elif type_ == 'utf8':
return StringType(name, nullable=nullable)
dtype = np.dtype(type_)
if dtype.kind in ('i', 'u'):
return IntegerType(name, dtype.kind == 'i', dtype.itemsize * 8,
nullable=nullable)
elif dtype.kind == 'f':
return FloatingPointType(name, dtype.itemsize * 8,
nullable=nullable)
elif dtype.kind == 'b':
return BooleanType(name, nullable=nullable)
else:
raise TypeError(dtype)
def generate_primitive_case():
types = ['bool', 'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64', 'binary', 'utf8']
fields = []
for type_ in types:
fields.append(get_field(type_ + "_nullable", type_, True))
fields.append(get_field(type_ + "_nonnullable", type_, False))
schema = JSONSchema(fields)
batch_sizes = [7, 10]
batches = []
for size in batch_sizes:
columns = []
for field in fields:
col = field.generate_column(size)
columns.append(col)
batches.append(JSONRecordBatch(size, columns))
return JSONFile(schema, batches)
def generate_nested_case():
fields = [
ListType('list_nullable', get_field('item', 'int32')),
StructType('struct_nullable', [get_field('f1', 'int32'),
get_field('f2', 'utf8')]),
# TODO(wesm): this causes segfault
# ListType('list_nonnullable', get_field('item', 'int32'), False),
]
schema = JSONSchema(fields)
batch_sizes = [7, 10]
batches = []
for size in batch_sizes:
columns = []
for field in fields:
col = field.generate_column(size)
columns.append(col)
batches.append(JSONRecordBatch(size, columns))
return JSONFile(schema, batches)
def get_generated_json_files():
temp_dir = tempfile.mkdtemp()
def _temp_path():
return
file_objs = []
K = 10
for i in range(K):
file_objs.append(generate_primitive_case())
file_objs.append(generate_nested_case())
generated_paths = []
for file_obj in file_objs:
out_path = os.path.join(temp_dir, guid() + '.json')
file_obj.write(out_path)
generated_paths.append(out_path)
return generated_paths
# ----------------------------------------------------------------------
# Testing harness
class IntegrationRunner(object):
def __init__(self, json_files, testers, debug=False):
self.json_files = json_files
self.testers = testers
self.temp_dir = tempfile.mkdtemp()
self.debug = debug
def run(self):
for producer, consumer in itertools.product(self.testers,
self.testers):
if producer is consumer:
continue
print('-- {0} producing, {1} consuming'.format(producer.name,
consumer.name))
for json_path in self.json_files:
print('Testing file {0}'.format(json_path))
# Make the random access file
print('-- Creating binary inputs')
producer_file_path = os.path.join(self.temp_dir, guid())
producer.json_to_file(json_path, producer_file_path)
# Validate the file
print('-- Validating file')
consumer.validate(json_path, producer_file_path)
print('-- Validating stream')
producer_stream_path = os.path.join(self.temp_dir, guid())
consumer_file_path = os.path.join(self.temp_dir, guid())
producer.file_to_stream(producer_file_path,
producer_stream_path)
consumer.stream_to_file(producer_stream_path,
consumer_file_path)
consumer.validate(json_path, consumer_file_path)
class Tester(object):
def __init__(self, debug=False):
self.debug = debug
def json_to_file(self, json_path, arrow_path):
raise NotImplementedError
def stream_to_file(self, stream_path, file_path):
raise NotImplementedError
def file_to_stream(self, file_path, stream_path):
raise NotImplementedError
def validate(self, json_path, arrow_path):
raise NotImplementedError
class JavaTester(Tester):
_arrow_version = load_version_from_pom()
ARROW_TOOLS_JAR = os.environ.get(
'ARROW_JAVA_INTEGRATION_JAR',
os.path.join(ARROW_HOME,
'java/tools/target/arrow-tools-{}-'
'jar-with-dependencies.jar'.format(_arrow_version)))
name = 'Java'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.Integration']
if arrow_path is not None:
cmd.extend(['-a', arrow_path])
if json_path is not None:
cmd.extend(['-j', json_path])
cmd.extend(['-c', command])
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def json_to_file(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'JSON_TO_ARROW')
def stream_to_file(self, stream_path, file_path):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.StreamToFile',
stream_path, file_path]
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def file_to_stream(self, file_path, stream_path):
cmd = ['java', '-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.FileToStream',
file_path, stream_path]
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
class CPPTester(Tester):
EXE_PATH = os.environ.get(
'ARROW_CPP_EXE_PATH',
os.path.join(ARROW_HOME, 'cpp/test-build/debug'))
CPP_INTEGRATION_EXE = os.path.join(EXE_PATH, 'json-integration-test')
STREAM_TO_FILE = os.path.join(EXE_PATH, 'stream-to-file')
FILE_TO_STREAM = os.path.join(EXE_PATH, 'file-to-stream')
name = 'C++'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = [self.CPP_INTEGRATION_EXE, '--integration']
if arrow_path is not None:
cmd.append('--arrow=' + arrow_path)
if json_path is not None:
cmd.append('--json=' + json_path)
cmd.append('--mode=' + command)
if self.debug:
print(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def json_to_file(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'JSON_TO_ARROW')
def stream_to_file(self, stream_path, file_path):
cmd = ['cat', stream_path, '|', self.STREAM_TO_FILE, '>', file_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
def file_to_stream(self, file_path, stream_path):
cmd = [self.FILE_TO_STREAM, file_path, '>', stream_path]
cmd = ' '.join(cmd)
if self.debug:
print(cmd)
os.system(cmd)
def get_static_json_files():
glob_pattern = os.path.join(ARROW_HOME, 'integration', 'data', '*.json')
return glob.glob(glob_pattern)
def run_all_tests(debug=False):
testers = [CPPTester(debug=debug), JavaTester(debug=debug)]
static_json_files = get_static_json_files()
generated_json_files = get_generated_json_files()
json_files = static_json_files + generated_json_files
runner = IntegrationRunner(json_files, testers, debug=debug)
runner.run()
print('-- All tests passed!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Arrow integration test CLI')
parser.add_argument('--debug', dest='debug', action='store_true',
default=False,
help='Run executables in debug mode as relevant')
args = parser.parse_args()
run_all_tests(debug=args.debug)
|
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manager for key bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import curses.ascii
import app.config
import app.curses_util
import app.log
import app.selectable
# import app.window
class Controller:
"""A Controller is a keyboard mapping from keyboard/mouse events to editor
commands."""
def __init__(self, view, name):
if app.config.strict_debug:
assert issubclass(self.__class__, Controller)
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.textBuffer = None
self.name = name
def parent_controller(self):
view = self.view.parent
while view is not None:
if view.controller is not None:
return view.controller
view = view.parent
def change_to_confirm_close(self):
self.find_and_change_to("confirmClose")
def change_to_confirm_overwrite(self):
self.find_and_change_to("confirmOverwrite")
def change_to_file_manager_window(self, *args):
self.find_and_change_to("fileManagerWindow")
def change_to_confirm_quit(self):
self.find_and_change_to("interactiveQuit")
def change_to_host_window(self, *args):
host = self.get_named_window("inputWindow")
if app.config.strict_debug:
assert issubclass(self.view.__class__, app.window.Window), self.view
assert issubclass(host.__class__, app.window.Window), host
self.view.change_focus_to(host)
def change_to_input_window(self, *args):
self.find_and_change_to("inputWindow")
def change_to_find(self):
self.find_and_change_to("interactiveFind")
def change_to_find_prior(self):
curses.ungetch(self.savedCh)
self.find_and_change_to("interactiveFind")
def change_to_goto(self):
self.find_and_change_to("interactiveGoto")
def change_to_palette_window(self):
self.find_and_change_to("paletteWindow")
def change_to_popup(self):
self.find_and_change_to("popupWindow")
def change_to_prediction(self):
self.find_and_change_to("predictionWindow")
# self.find_and_change_to('interactivePrediction')
def change_to_prompt(self):
self.find_and_change_to("interactivePrompt")
def change_to_quit(self):
self.find_and_change_to("interactiveQuit")
def change_to_save_as(self):
view = self.get_named_window("fileManagerWindow")
view.set_mode("saveAs")
view.bring_to_front()
view.change_focus_to(view)
def create_new_text_buffer(self):
bufferManager = self.view.program.bufferManager
self.view.set_text_buffer(bufferManager.new_text_buffer())
def do_command(self, ch, meta):
# Check the commandSet for the input with both its string and integer
# representation.
self.savedCh = ch
cmd = self.commandSet.get(ch) or self.commandSet.get(
app.curses_util.curses_key_name(ch)
)
if cmd:
cmd()
else:
self.commandDefault(ch, meta)
self.textBuffer.compound_change_push()
def get_named_window(self, windowName):
view = self.view
while view is not None:
if hasattr(view, windowName):
return getattr(view, windowName)
view = view.parent
app.log.fatal(windowName + " not found")
return None
def current_input_window(self):
return self.get_named_window("inputWindow")
def find_and_change_to(self, windowName):
window = self.get_named_window(windowName)
window.bring_to_front()
self.view.change_focus_to(window)
return window
def change_to(self, window):
window.bring_to_front()
self.view.change_focus_to(window)
def focus(self):
pass
def confirmation_prompt_finish(self, *args):
window = self.get_named_window("inputWindow")
window.userIntent = "edit"
window.bring_to_front()
self.view.change_focus_to(window)
def __close_host_file(self, host):
"""Close the current file and switch to another or create an empty
file."""
bufferManager = host.program.bufferManager
bufferManager.close_text_buffer(host.textBuffer)
host.userIntent = "edit"
tb = bufferManager.get_unsaved_buffer()
if not tb:
tb = bufferManager.next_buffer()
if not tb:
tb = bufferManager.new_text_buffer()
host.set_text_buffer(tb)
def close_file(self):
app.log.info()
host = self.get_named_window("inputWindow")
self.__close_host_file(host)
self.confirmation_prompt_finish()
def close_or_confirm_close(self):
"""If the file is clean, close it. If it is dirty, prompt the user
about whether to lose unsaved changes."""
host = self.get_named_window("inputWindow")
tb = host.textBuffer
if not tb.is_dirty():
self.__close_host_file(host)
return
if host.userIntent == "edit":
host.userIntent = "close"
self.change_to_confirm_close()
def initiate_close(self):
"""Called from input window controller."""
self.view.userIntent = "close"
tb = self.view.textBuffer
if not tb.is_dirty():
self.__close_host_file(self.view)
return
self.view.change_focus_to(self.view.confirmClose)
def initiate_quit(self):
"""Called from input window controller."""
self.view.userIntent = "quit"
tb = self.view.textBuffer
if tb.is_dirty():
self.view.change_focus_to(self.view.interactiveQuit)
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.get_unsaved_buffer()
if tb:
self.view.set_text_buffer(tb)
self.view.change_focus_to(self.view.interactiveQuit)
return
bufferManager.debug_log()
self.view.quit_now()
def initiate_save(self):
"""Called from input window controller."""
self.view.userIntent = "edit"
tb = self.view.textBuffer
if tb.fullPath:
if not tb.is_safe_to_write():
self.view.change_focus_to(self.view.confirmOverwrite)
return
tb.file_write()
return
self.change_to_save_as()
def overwrite_host_file(self):
"""Close the current file and switch to another or create an empty
file.
"""
host = self.get_named_window("inputWindow")
host.textBuffer.file_write()
if host.userIntent == "quit":
self.quit_or_switch_to_confirm_quit()
return
if host.userIntent == "close":
self.__close_host_file(host)
self.change_to_host_window()
def next_focusable_window(self):
window = self.view.next_focusable_window(self.view)
if window is not None:
self.view.change_focus_to(window)
return window is not None
def prior_focusable_window(self):
window = self.view.prior_focusable_window(self.view)
if window is not None:
self.view.change_focus_to(window)
return window is not None
def write_or_confirm_overwrite(self):
"""Ask whether the file should be overwritten."""
app.log.debug()
host = self.get_named_window("inputWindow")
tb = host.textBuffer
if not tb.is_safe_to_write():
self.change_to_confirm_overwrite()
return
tb.file_write()
# TODO(dschuyler): Is there a deeper issue here that necessitates saving
# the message? Does this only need to wrap the change_to_host_window()?
# Store the save message so it is not overwritten.
saveMessage = tb.message
if host.userIntent == "quit":
self.quit_or_switch_to_confirm_quit()
return
if host.userIntent == "close":
self.__close_host_file(host)
self.change_to_host_window()
tb.message = saveMessage # Restore the save message.
def quit_or_switch_to_confirm_quit(self):
app.log.debug(self, self.view)
host = self.get_named_window("inputWindow")
tb = host.textBuffer
host.userIntent = "quit"
if tb.is_dirty():
self.change_to_confirm_quit()
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.get_unsaved_buffer()
if tb:
host.set_text_buffer(tb)
self.change_to_confirm_quit()
return
bufferManager.debug_log()
host.quit_now()
def save_or_change_to_save_as(self):
app.log.debug()
host = self.get_named_window("inputWindow")
if app.config.strict_debug:
assert issubclass(self.__class__, Controller), self
assert issubclass(self.view.__class__, app.window.Window), self
assert issubclass(host.__class__, app.window.Window), self
assert self.view.textBuffer is self.textBuffer
assert self.view.textBuffer is not host.textBuffer
if host.textBuffer.fullPath:
self.write_or_confirm_overwrite()
return
self.change_to_save_as()
def on_change(self):
pass
def save_event_change_to_host_window(self, *args):
curses.ungetch(self.savedCh)
host = self.get_named_window("inputWindow")
host.bring_to_front()
self.view.change_focus_to(host)
def set_text_buffer(self, textBuffer):
if app.config.strict_debug:
assert issubclass(
textBuffer.__class__, app.text_buffer.TextBuffer
), textBuffer
assert self.view.textBuffer is textBuffer
self.textBuffer = textBuffer
def unfocus(self):
pass
class MainController:
"""The different keyboard mappings are different controllers. This class
manages a collection of keyboard mappings and allows the user to switch
between them."""
def __init__(self, view):
if app.config.strict_debug:
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.controllers = {}
self.controller = None
def add(self, controller):
self.controllers[controller.name] = controller
self.controller = controller
def current_input_window(self):
return self.controller.current_input_window()
def do_command(self, ch, meta):
self.controller.do_command(ch, meta)
def focus(self):
app.log.info("MainController.focus")
self.controller.focus()
if 0:
self.commandDefault = self.controller.commandDefault
commandSet = self.controller.commandSet.copy()
commandSet.update(
{
app.curses_util.KEY_F2: self.next_controller,
}
)
self.controller.commandSet = commandSet
def on_change(self):
tb = self.view.textBuffer
if tb.message is None and tb.selectionMode != app.selectable.kSelectionNone:
charCount, lineCount = tb.count_selected()
tb.set_message(
u"%d characters (%d lines) selected" % (charCount, lineCount)
)
self.controller.on_change()
def next_controller(self):
app.log.info("next_controller")
if 0:
if self.controller is self.controllers["cuaPlus"]:
app.log.info("MainController.next_controller cua")
self.controller = self.controllers["cua"]
elif self.controller is self.controllers["cua"]:
app.log.info("MainController.next_controller emacs")
self.controller = self.controllers["emacs"]
elif self.controller is self.controllers["emacs"]:
app.log.info("MainController.next_controller vi")
self.controller = self.controllers["vi"]
else:
app.log.info("MainController.next_controller cua")
self.controller = self.controllers["cua"]
self.controller.set_text_buffer(self.textBuffer)
self.focus()
def set_text_buffer(self, textBuffer):
app.log.info("MainController.set_text_buffer", self.controller)
if app.config.strict_debug:
assert issubclass(textBuffer.__class__, app.text_buffer.TextBuffer)
self.textBuffer = textBuffer
self.controller.set_text_buffer(textBuffer)
def unfocus(self):
self.controller.unfocus()
|
|
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
import unittest
import numpy as np
from numpy import nan
import array
import datetime as dt
from ..data_structures import image
from .. import SArray
import os
from ..cython.cy_flexible_type import _translate_through_flexible_type as _flexible_type
from ..cython.cy_flexible_type import _translate_through_flex_list as _tr_flex_list
from ..cython.cy_flexible_type import infer_type_of_list
from ..cython.cy_flexible_type import _get_inferred_column_type, _all_convertable
from ..util.timezone import GMT
import datetime
from itertools import product
from copy import copy
NoneType = type(None)
current_file_dir = os.path.dirname(os.path.realpath(__file__))
def from_lambda(v):
from ..connect import main as glconnect
u = glconnect.get_unity()
return u.eval_lambda(lambda x: x, v)
special_types = set()
IntegerValue = (
[int(0), long(1)]
+ [_dt(0) for _dt in (np.sctypes['int'] + np.sctypes['uint']
+ [np.bool, bool, np.bool_])])
special_types.add(id(IntegerValue))
FloatValue = [float(0)] + [_dt(0) for _dt in np.sctypes['float']]
special_types.add(id(FloatValue))
StringValue = ([str('bork'), unicode('bork')]
+ [_dt('bork') for _dt in
[np.unicode, np.unicode_, str, unicode, np.str,
np.str_, np.string_]])
special_types.add(id(StringValue))
DictValue = [{'a' : 12}, dict()]
special_types.add(id(DictValue))
DatetimeValue = [datetime.date(2000, 6, 12),
datetime.date(1100, 1, 1),
datetime.datetime(2000, 6, 12)]
special_types.add(id(DatetimeValue))
AnyValue = IntegerValue + FloatValue + StringValue + DatetimeValue + DictValue
special_types.add(id(AnyValue))
# All the different types of float sequences we support
FloatSequence = (
[[0.5, 1.5, 2.5], (0.5, 1.5, 2.5),
{0.5, 1.5, 2.5}, frozenset([0.5, 1.5, 2.5])]
+ [array.array(c, [0.5, 1.5, 2.5]) for c in 'fd']
+ [np.array([0.5, 1.5, 2.5], dtype= _dt) for _dt in np.sctypes['float']])
special_types.add(id(FloatSequence))
# All the different types of float sequences we support
FloatSequenceWithNAN = (
[[0.5, 1.5, 2.5, nan], (0.5, 1.5, 2.5, nan),
{0.5, 1.5, 2.5, nan}, frozenset([0.5, 1.5, 2.5, nan])]
+ [array.array(c, [0.5, 1.5, 2.5, nan]) for c in 'fd']
+ [np.array([0.5, 1.5, 2.5, nan], dtype= _dt) for _dt in np.sctypes['float']])
special_types.add(id(FloatSequenceWithNAN))
# All the different types of float sequences we support
FloatSequenceWithNone = (
[[0.5, 1.5, 2.5, None], (0.5, 1.5, 2.5, None)])
special_types.add(id(FloatSequenceWithNone))
# All the different integer sequences we support
IntegerSequence = (
[[int(i) for i in range(3)]
, [long(i) for i in range(3)]
, tuple(range(3))
, tuple(long(i) for i in range(3))
, set(range(3))
, frozenset(range(3))
]
+ [array.array(c, range(3)) for c in 'bBhHiIlL']
+ [np.array(range(3), dtype = _dt) for _dt in np.sctypes['int']]
+ [np.array(range(3), dtype = _dt) for _dt in np.sctypes['uint']])
special_types.add(id(IntegerSequence))
# All the different integer sequences we support, with a Nan
IntegerSequenceWithNAN = (
[[int(i) for i in range(3)] + [nan]
, [long(i) for i in range(3)] + [nan]
, tuple(range(3)) + (nan,)
, tuple(long(i) for i in range(3)) + (nan,)
, set([long(i) for i in range(3)] + [nan])
, frozenset([long(i) for i in range(3)] + [nan])])
special_types.add(id(IntegerSequenceWithNAN))
# All the different types of string
IntegerSequenceWithNone = (
[[int(i) for i in range(3)] + [None]
, [long(i) for i in range(3)] + [None]
, tuple(range(3)) + (None,)
, tuple(long(i) for i in range(3)) + (None,)
, set([long(i) for i in range(3)] + [None])
, frozenset([long(i) for i in range(3)] + [None])])
special_types.add(id(IntegerSequenceWithNone))
# Empty but typed float arrays
EmptyFloatArray = (
[array.array(c, []) for c in 'fd']
+ [np.array([], dtype= _dt) for _dt in np.sctypes['float']])
special_types.add(id(EmptyFloatArray))
# Empty but typed integer arrays
EmptyIntegerArray = (
[array.array(c, []) for c in 'cbBhHiIlL']
+ [np.array([], dtype= _dt) for _dt in np.sctypes['int']]
+ [np.array([], dtype= _dt) for _dt in np.sctypes['uint']])
special_types.add(id(EmptyIntegerArray))
# All empty arrays
EmptyArray = EmptyIntegerArray + EmptyFloatArray
special_types.add(id(EmptyArray))
EmptySequence = [[], tuple(), set()]
special_types.add(id(EmptySequence))
# Boolean Sequences
BooleanSequence = (
[ list( (i%2 == 0) for i in range(3))
, tuple( (i%2 == 0) for i in range(3))
, set([True]), set([False]), set([True, False])]
+ [np.array([i%2==0 for i in range(3)], dtype= _dt)
for _dt in [np.bool, np.bool_, bool]])
special_types.add(id(BooleanSequence))
# String sequences
StringSequence = (
[ list( str(i) for i in range(3))
, tuple( str(i) for i in range(3))
, set( str(i) for i in range(3))
, frozenset( str(i) for i in range(3))]
+ [np.array([_dt('a'), _dt('b')], dtype = _dt)
for _dt in [np.unicode, np.unicode_, str, unicode, np.str, np.str_, np.string_]]
+ [np.array([_dt('a'), _dt('b')], dtype = object)
for _dt in [np.unicode, np.unicode_, str, unicode, np.str, np.str_, np.string_]])
special_types.add(id(StringSequence))
AnySequence = (EmptySequence + BooleanSequence + StringSequence
+ IntegerSequence + IntegerSequenceWithNone + IntegerSequenceWithNAN
+ FloatSequence + FloatSequenceWithNone + FloatSequenceWithNAN
+ EmptyArray)
special_types.add(id(AnySequence))
def verify_inference(values, expected_type):
# Go through and build a list of all the possible value enumerations that need to be tested.
def build_lookups(values, L):
for v in values:
if id(v) in special_types:
L.append(range(len(v)))
elif type(v) is list:
build_lookups(v, L)
indices = []
build_lookups(values, indices)
def get_value(values, idx_set):
ret = copy(values)
for i, v in enumerate(values):
if id(v) in special_types:
ret[i] = v[idx_set[-1]]
del idx_set[-1]
elif type(v) is list:
ret[i] = get_value(v, idx_set)
return ret
for idx_set in product(*reversed(indices)):
_v_list = get_value(values, list(idx_set))
for add_none in [True, False]:
v_list = _v_list + [None] if add_none else _v_list
inferred_type, result = _get_inferred_column_type(v_list)
if inferred_type != expected_type:
assert False, ("Expected type %s, got type %s; input value = %s."
% (str(expected_type), str(inferred_type), str(v_list)))
if inferred_type != NoneType:
reconverted_result = _tr_flex_list(result, inferred_type)
assert str(result) == str(reconverted_result), \
(("Values in type translated inconsistently: "
"\nInput value = %s"
"\nOutput value = %s"
"\nReconverted = %s")
% (str(v_list), str(result), reconverted_result))
class FlexibleTypeInference(unittest.TestCase):
def test_int_float(self):
verify_inference([IntegerValue], int)
verify_inference([IntegerValue, IntegerValue], int)
verify_inference([IntegerValue, FloatValue], float)
verify_inference([IntegerValue, nan], float)
verify_inference([], float)
verify_inference([None], float)
verify_inference([IntegerValue, nan], float)
verify_inference([IntegerValue, None, nan], float)
verify_inference([IntegerValue, None, FloatValue], float)
verify_inference([IntegerValue, None, FloatValue, nan], float)
def test_string(self):
verify_inference([StringValue], str)
verify_inference([StringValue, StringValue], str)
verify_inference([StringValue, IntegerValue], NoneType)
verify_inference([StringValue, FloatValue], NoneType)
def test_dict(self):
verify_inference([DictValue], dict)
verify_inference([DictValue, DictValue], dict)
def test_mixed_types(self):
verify_inference([AnySequence, AnyValue], NoneType)
verify_inference([AnySequence, AnyValue, AnySequence], NoneType)
verify_inference([AnySequence, AnyValue, AnyValue], NoneType)
verify_inference([DatetimeValue, StringValue], NoneType)
verify_inference([DatetimeValue, IntegerValue], NoneType)
verify_inference([DatetimeValue, FloatValue], NoneType)
def test_array_list(self):
tests = [
# Individual types
([EmptySequence], list),
([IntegerSequence], array.array),
([IntegerSequenceWithNone], list),
([IntegerSequenceWithNAN], array.array),
([FloatSequence], array.array),
([FloatSequenceWithNAN], array.array),
([FloatSequenceWithNone], list),
([EmptyIntegerArray], array.array),
([EmptyFloatArray], array.array),
([BooleanSequence], array.array),
([StringSequence], list),
# Multiple types
([IntegerSequence, FloatSequence], array.array),
([IntegerSequence, FloatSequence], array.array),
# Multiple types
([EmptySequence, EmptyFloatArray], array.array),
([EmptySequence, EmptyIntegerArray], array.array),
([EmptySequence, IntegerSequence], array.array),
([EmptySequence, FloatSequence], array.array),
# Multiple types
([EmptySequence, EmptyFloatArray], array.array),
([EmptySequence, EmptyIntegerArray], array.array),
([EmptySequence, IntegerSequence], array.array),
([EmptySequence, FloatSequence], array.array),
# Arrays and lists
([StringSequence, EmptyFloatArray], list),
([StringSequence, EmptyIntegerArray], list),
([StringSequence, IntegerSequence], list),
([StringSequence, FloatSequence], list)]
# Add in additional rules for testing
for tv, res in copy(tests):
tests.append( (tv + [EmptySequence], res) )
for tv, res in copy(tests):
tests.append( (tv + [[None]], list) )
for tv, res in copy(tests):
tests.append( (tv + [StringSequence], list) )
# Run the tests
for tv, res in tests:
verify_inference(tv, res)
class FlexibleTypeTest(unittest.TestCase):
# On lambda return, if the return value is a non-empty of list of
# all numerical values, we try hard to use array.array
def numeric_list_to_array(self, v):
if (type(v) is list) and (len(v) > 0) and all((type(x) is int) or (type(x) is float) for x in v):
return array.array('d', v)
elif (type(v) is list):
return [self.numeric_list_to_array(x) for x in v]
else:
return v
def assert_equal_with_lambda_check(self, translated, correct):
self.assertEqual(translated, correct)
self.assertEqual(from_lambda(translated), self.numeric_list_to_array(correct))
def test_none(self):
self.assert_equal_with_lambda_check(_flexible_type(None), None)
def test_date_time(self):
d = datetime.datetime(2010, 10, 10, 10, 10, 10);
self.assert_equal_with_lambda_check(_flexible_type(d),d)
def test_int(self):
self.assert_equal_with_lambda_check(_flexible_type(1), 1)
self.assert_equal_with_lambda_check(_flexible_type(1L), 1)
self.assert_equal_with_lambda_check(_flexible_type(True), 1)
self.assert_equal_with_lambda_check(_flexible_type(False), 0)
# numpy types
self.assert_equal_with_lambda_check(_flexible_type(np.int_(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.int64(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.int32(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.int16(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.uint64(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.uint32(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.uint16(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool(0)), 0)
self.assert_equal_with_lambda_check(_flexible_type(np.bool_(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool_(0)), 0)
self.assert_equal_with_lambda_check(_flexible_type(np.bool8(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool8(0)), 0)
def test_float(self):
self.assert_equal_with_lambda_check(_flexible_type(0.25), 0.25)
# numpy types
self.assert_equal_with_lambda_check(_flexible_type(np.float(0.25)), 0.25)
self.assert_equal_with_lambda_check(_flexible_type(np.float_(0.25)), 0.25)
self.assert_equal_with_lambda_check(_flexible_type(np.float16(0.25)), 0.25)
self.assert_equal_with_lambda_check(_flexible_type(np.float32(0.25)), 0.25)
self.assert_equal_with_lambda_check(_flexible_type(np.float64(0.25)), 0.25)
def test_string(self):
self.assert_equal_with_lambda_check(_flexible_type("a"), "a")
self.assert_equal_with_lambda_check(_flexible_type(unicode("a")), "a")
# numpy types
self.assert_equal_with_lambda_check(_flexible_type(np.string_("a")), "a")
self.assert_equal_with_lambda_check(_flexible_type(np.unicode_("a")), "a")
def test_array(self):
# float array
expected = array.array('d', [.1, .2, .3])
self.assert_equal_with_lambda_check(_flexible_type(expected), expected)
# int array
expected = array.array('d', [1, 2, 3])
self.assert_equal_with_lambda_check(_flexible_type([1, 2, 3]), expected)
self.assert_equal_with_lambda_check(_flexible_type([1.0, 2.0, 3.0]), expected)
self.assert_equal_with_lambda_check(_flexible_type([1, 2, 3.0]), expected)
# numpy ndarray
expected = np.asarray([1, 2, 3])
self.assertSequenceEqual(_flexible_type(expected), list(expected))
self.assertEquals(from_lambda(expected), array.array('d', expected))
expected = np.asarray([.1, .2, .3])
self.assertSequenceEqual(_flexible_type(expected), list(expected))
self.assertEquals(from_lambda(expected), array.array('d', expected))
def test_dict(self):
d = dt.datetime(2010, 10, 10, 10, 10, 10)
img = image.Image(current_file_dir + "/images/nested/sample_grey.jpg","JPG")
expected = {'int': 0, 'float': 0.1, 'str': 'str',
'list': ['a', 'b', 'c'], 'array': array.array('d', [1, 2, 3]),'datetime':[d],
'image': img ,'none': None}
self.assert_equal_with_lambda_check(_flexible_type(expected), expected)
self.assert_equal_with_lambda_check(_flexible_type({}), {})
expected = [{'a': 1, 'b': 20, 'c': None}, {"b": 4, None: 5}, None, {'a': 0}]
self.assert_equal_with_lambda_check(_flexible_type(expected), expected)
def test_list(self):
d = dt.datetime(2010, 10, 10, 10, 10, 10)
img = image.Image(current_file_dir + "/images/nested/sample_grey.jpg","JPG")
expected = [None, img, 1, 0.1, '1',d,array.array('d', [1, 2, 3]), {'foo': array.array('d', [1, 2,3])}]
self.assert_equal_with_lambda_check(_flexible_type(expected), expected)
self.assert_equal_with_lambda_check(_flexible_type([]), [])
self.assert_equal_with_lambda_check(_flexible_type([[], []]), [[], []])
def test_image(self):
img_gray_jpg = image.Image(current_file_dir + "/images/nested/sample_grey.jpg","JPG")
img_gray_png = image.Image(current_file_dir + "/images/nested/sample_grey.png","PNG")
img_gray_auto_jpg = image.Image(current_file_dir + "/images/nested/sample_grey.jpg")
img_gray_auto_png = image.Image(current_file_dir + "/images/nested/sample_grey.png")
img_color_jpg = image.Image(current_file_dir + "/images/sample.jpg","JPG")
img_color_png = image.Image(current_file_dir + "/images/sample.png","PNG")
img_color_auto_jpg = image.Image(current_file_dir + "/images/sample.jpg")
img_color_auto_png = image.Image(current_file_dir + "/images/sample.png")
self.assert_equal_with_lambda_check(_flexible_type(img_gray_jpg),img_gray_jpg)
self.assert_equal_with_lambda_check(_flexible_type(img_gray_png),img_gray_png)
self.assert_equal_with_lambda_check(_flexible_type(img_gray_auto_jpg),img_gray_auto_jpg)
self.assert_equal_with_lambda_check(_flexible_type(img_gray_auto_png),img_gray_png)
self.assert_equal_with_lambda_check(_flexible_type(img_color_jpg),img_color_jpg)
self.assert_equal_with_lambda_check(_flexible_type(img_color_png),img_color_png)
self.assert_equal_with_lambda_check(_flexible_type(img_color_auto_jpg),img_color_auto_jpg)
self.assert_equal_with_lambda_check(_flexible_type(img_color_auto_png),img_color_auto_png)
def test_tr_flex_list(self):
expected = []
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
# test int list
expected = [1, 2, 3, 4, 5, None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, int), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, int, ignore_cast_failure=True), expected)
# test datetime list
from_zone = GMT(0)
to_zone = GMT(4.5)
d1 = dt.datetime(2010, 10, 10, 10, 10, 10).replace(tzinfo=from_zone)
d2 = d1.astimezone(to_zone)
expected = [d1,d2, None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, dt.datetime), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, dt.datetime, ignore_cast_failure=True), expected)
# test image list
img_gray_auto_png = image.Image(current_file_dir + "/images/nested/sample_grey.png")
img_color_jpg = image.Image(current_file_dir + "/images/sample.jpg","JPG")
expected = [img_gray_auto_png, img_color_jpg, None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, image.Image), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, image.Image, ignore_cast_failure=True), expected)
# test str list
expected = ['a', 'b', 'c', None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, str), expected)
# test array list
expected = [array.array('d', range(5)), array.array('d', range(5)), None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), expected)
self.assert_equal_with_lambda_check(_tr_flex_list(expected, array.array), expected)
expected = [[float(i) for i in range(5)], range(5), None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected), [array.array('d', range(5)),
array.array('d', range(5)), None])
# test int array
expected = array.array('d', range(5))
self.assert_equal_with_lambda_check(_tr_flex_list(expected), range(5))
expected = [1, 1.0, '1', [1., 1., 1.], ['a', 'b', 'c'], {}, {'a': 1}, None]
self.assert_equal_with_lambda_check(_tr_flex_list(expected, int, ignore_cast_failure=True), [1, 1, None])
self.assert_equal_with_lambda_check(_tr_flex_list(expected, float, ignore_cast_failure=True), [1.0, 1.0, None])
# Anything can be cast to a string
# self.assert_equal_with_lambda_check(_tr_flex_list(expected, str, ignore_cast_failure=True), ['1', '1', None])
self.assert_equal_with_lambda_check(_tr_flex_list(expected, array.array, ignore_cast_failure=True), [array.array('d', [1., 1., 1.]), None])
self.assert_equal_with_lambda_check(_tr_flex_list(expected, list, ignore_cast_failure=True), [[1., 1., 1.], ['a', 'b', 'c'], None])
self.assert_equal_with_lambda_check(_tr_flex_list(expected, dict, ignore_cast_failure=True), [{}, {'a': 1}, None])
def test_infer_list_type(self):
self.assertEquals(infer_type_of_list([image.Image(current_file_dir + "/images/nested/sample_grey.png"), image.Image(current_file_dir + "/images/sample.jpg","JPG"), image.Image(current_file_dir + "/images/sample.png")
]), image.Image)
self.assertEquals(infer_type_of_list([dt.datetime(2010, 10, 10, 10, 10, 10), dt.datetime(2000, 5, 7, 10, 4, 10),dt.datetime(1845, 5, 7, 4, 4, 10)]), dt.datetime)
self.assertEquals(infer_type_of_list([0, 1, 2]), int)
self.assertEquals(infer_type_of_list([0, 1, 2.0]), float)
self.assertEquals(infer_type_of_list(['foo', u'bar']), str)
self.assertEquals(infer_type_of_list([array.array('d', [1, 2, 3]), array.array('d', [1, 2, 3])]), array.array)
self.assertEquals(infer_type_of_list([[], [1.0, 2.0, 3.0], array.array('d', [1, 2, 3])]), array.array)
self.assertEquals(infer_type_of_list([[], [1, 2, 3], array.array('d', [1, 2, 3])]), array.array)
self.assertEquals(infer_type_of_list([{'a': 1}, {'b': 2}]), dict)
def test_datetime_lambda(self):
d = dt.datetime.now()
sa = SArray([d])
# Lambda returning self
sa_self = sa.apply(lambda x: x)
for i in range(len(sa_self)):
self.assertEqual(sa[i], sa_self[i])
# Lambda returning year
sa_year = sa.apply(lambda x: x.year)
for i in range(len(sa_year)):
self.assertEqual(sa[i].year, sa_year[i])
# Lambda returning second
sa_sec = sa.apply(lambda x: x.second)
for i in range(len(sa_sec)):
self.assertEqual(sa[i].second, sa_sec[i])
|
|
import unittest
import traceback
import gym
from gym.spaces import Box, Discrete, Tuple, Dict, MultiDiscrete
from gym.envs.registration import EnvSpec
import numpy as np
import sys
import ray
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork as FCNetV2
from ray.rllib.models.tf.visionnet_v2 import VisionNetwork as VisionNetV2
from ray.rllib.tests.test_multi_agent_env import (MultiCartpole,
MultiMountainCar)
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.tune.registry import register_env
ACTION_SPACES_TO_TEST = {
"discrete": Discrete(5),
"vector": Box(-1.0, 1.0, (5, ), dtype=np.float32),
"vector2": Box(-1.0, 1.0, (
5,
5,
), dtype=np.float32),
"multidiscrete": MultiDiscrete([1, 2, 3, 4]),
"tuple": Tuple(
[Discrete(2),
Discrete(3),
Box(-1.0, 1.0, (5, ), dtype=np.float32)]),
}
OBSERVATION_SPACES_TO_TEST = {
"discrete": Discrete(5),
"vector": Box(-1.0, 1.0, (5, ), dtype=np.float32),
"vector2": Box(-1.0, 1.0, (5, 5), dtype=np.float32),
"image": Box(-1.0, 1.0, (84, 84, 1), dtype=np.float32),
"atari": Box(-1.0, 1.0, (210, 160, 3), dtype=np.float32),
"tuple": Tuple([Discrete(10),
Box(-1.0, 1.0, (5, ), dtype=np.float32)]),
"dict": Dict({
"task": Discrete(10),
"position": Box(-1.0, 1.0, (5, ), dtype=np.float32),
}),
}
def make_stub_env(action_space, obs_space, check_action_bounds):
class StubEnv(gym.Env):
def __init__(self):
self.action_space = action_space
self.observation_space = obs_space
self.spec = EnvSpec("StubEnv-v0")
def reset(self):
sample = self.observation_space.sample()
return sample
def step(self, action):
if check_action_bounds and not self.action_space.contains(action):
raise ValueError("Illegal action for {}: {}".format(
self.action_space, action))
if (isinstance(self.action_space, Tuple)
and len(action) != len(self.action_space.spaces)):
raise ValueError("Illegal action for {}: {}".format(
self.action_space, action))
return self.observation_space.sample(), 1, True, {}
return StubEnv
def check_support(alg, config, stats, check_bounds=False, name=None):
covered_a = set()
covered_o = set()
config["log_level"] = "ERROR"
for a_name, action_space in ACTION_SPACES_TO_TEST.items():
for o_name, obs_space in OBSERVATION_SPACES_TO_TEST.items():
print("=== Testing", alg, action_space, obs_space, "===")
stub_env = make_stub_env(action_space, obs_space, check_bounds)
register_env("stub_env", lambda c: stub_env())
stat = "ok"
a = None
try:
if a_name in covered_a and o_name in covered_o:
stat = "skip" # speed up tests by avoiding full grid
else:
a = get_agent_class(alg)(config=config, env="stub_env")
if alg not in ["DDPG", "ES", "ARS"]:
if o_name in ["atari", "image"]:
assert isinstance(a.get_policy().model,
VisionNetV2)
elif o_name in ["vector", "vector2"]:
assert isinstance(a.get_policy().model, FCNetV2)
a.train()
covered_a.add(a_name)
covered_o.add(o_name)
except UnsupportedSpaceException:
stat = "unsupported"
except Exception as e:
stat = "ERROR"
print(e)
print(traceback.format_exc())
finally:
if a:
try:
a.stop()
except Exception as e:
print("Ignoring error stopping agent", e)
pass
print(stat)
print()
stats[name or alg, a_name, o_name] = stat
def check_support_multiagent(alg, config):
register_env("multi_mountaincar", lambda _: MultiMountainCar(2))
register_env("multi_cartpole", lambda _: MultiCartpole(2))
config["log_level"] = "ERROR"
if "DDPG" in alg:
a = get_agent_class(alg)(config=config, env="multi_mountaincar")
else:
a = get_agent_class(alg)(config=config, env="multi_cartpole")
try:
a.train()
finally:
a.stop()
class ModelSupportedSpaces(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=4)
def tearDown(self):
ray.shutdown()
def testAll(self):
stats = {}
check_support("IMPALA", {"num_gpus": 0}, stats)
check_support("APPO", {"num_gpus": 0, "vtrace": False}, stats)
check_support(
"APPO", {
"num_gpus": 0,
"vtrace": True
}, stats, name="APPO-vt")
check_support(
"DDPG", {
"exploration_ou_noise_scale": 100.0,
"timesteps_per_iteration": 1,
"use_state_preprocessor": True,
},
stats,
check_bounds=True)
check_support("DQN", {"timesteps_per_iteration": 1}, stats)
check_support(
"A3C", {
"num_workers": 1,
"optimizer": {
"grads_per_step": 1
}
},
stats,
check_bounds=True)
check_support(
"PPO", {
"num_workers": 1,
"num_sgd_iter": 1,
"train_batch_size": 10,
"sample_batch_size": 10,
"sgd_minibatch_size": 1,
},
stats,
check_bounds=True)
check_support(
"ES", {
"num_workers": 1,
"noise_size": 10000000,
"episodes_per_batch": 1,
"train_batch_size": 1
}, stats)
check_support(
"ARS", {
"num_workers": 1,
"noise_size": 10000000,
"num_rollouts": 1,
"rollouts_used": 1
}, stats)
check_support(
"PG", {
"num_workers": 1,
"optimizer": {}
},
stats,
check_bounds=True)
num_unexpected_errors = 0
for (alg, a_name, o_name), stat in sorted(stats.items()):
if stat not in ["ok", "unsupported", "skip"]:
num_unexpected_errors += 1
print(alg, "action_space", a_name, "obs_space", o_name, "result",
stat)
self.assertEqual(num_unexpected_errors, 0)
def testMultiAgent(self):
check_support_multiagent(
"APEX", {
"num_workers": 2,
"timesteps_per_iteration": 1000,
"num_gpus": 0,
"min_iter_time_s": 1,
"learning_starts": 1000,
"target_network_update_freq": 100,
})
check_support_multiagent(
"APEX_DDPG", {
"num_workers": 2,
"timesteps_per_iteration": 1000,
"num_gpus": 0,
"min_iter_time_s": 1,
"learning_starts": 1000,
"target_network_update_freq": 100,
"use_state_preprocessor": True,
})
check_support_multiagent("IMPALA", {"num_gpus": 0})
check_support_multiagent("DQN", {"timesteps_per_iteration": 1})
check_support_multiagent("A3C", {
"num_workers": 1,
"optimizer": {
"grads_per_step": 1
}
})
check_support_multiagent(
"PPO", {
"num_workers": 1,
"num_sgd_iter": 1,
"train_batch_size": 10,
"sample_batch_size": 10,
"sgd_minibatch_size": 1,
})
check_support_multiagent("PG", {"num_workers": 1, "optimizer": {}})
check_support_multiagent("DDPG", {
"timesteps_per_iteration": 1,
"use_state_preprocessor": True,
})
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "--smoke":
ACTION_SPACES_TO_TEST = {
"discrete": Discrete(5),
}
OBSERVATION_SPACES_TO_TEST = {
"vector": Box(0.0, 1.0, (5, ), dtype=np.float32),
"atari": Box(0.0, 1.0, (210, 160, 3), dtype=np.float32),
}
unittest.main(verbosity=2)
|
|
# Copyright (C) 2013 Lindley Graham
"""
This modules controls the management and creation of ``*.table`` files
"""
import glob, os, re
from polyadcirc.pyADCIRC.basic import pickleable
if __name__ == "__main__":
pass
def create_table_single_value(class_num, landuse_table, manningsn_value,
folder_name=None):
"""
Create a ``*.table`` in ``folder_name`` where the landuse classification
numbered class_num is assigned a value of ``manningsn_value`` and all other
landuse classifications are assigned a manningsn_value of 0
:param int class_num: land classification number
:type landuse_table: :class:`tableInfo`
:param landuse_table: table to base the single value table off of
:param float manningsn_value: Manningn's *n* value for ``class_num``
:param string folder_name: folder to create the table in
"""
new_values = landuse_table.land_classes.copy()
for k in new_values.iterkeys():
new_values[k] = 0
new_values[class_num] = manningsn_value
new_table = tableInfo(landuse_table.file_name, new_values)
create_table(new_table, folder_name)
def create_table(landuse_table, folder_name=None):
"""
Create ``table_name.table`` in ``folder_name`` where the landuse
classification numbered ``landuse_table.keys()`` is assigned a
``manningsn_value`` of ``landuse_table['key']``
:type landuse_table: :class:`tableInfo`
:param landuse_table: table to create
:param string folder_name: folder to create the table in
"""
print 'Creating landuse_table file '+landuse_table.file_name+'...'
if folder_name is None:
folder_name = ''
with open(os.path.join(folder_name, landuse_table.file_name), 'w') as f:
next_str = ' {0:3} ! '.format(landuse_table.get_num_landclasses())
next_str += 'Total number of Class\n'
f.write(next_str)
for k, v in landuse_table.land_classes.iteritems():
f.write('{0:3} {1!r} :description\n'.format(k, v))
f.write('The class has default value(=-9999) will be skiped in mapping')
def read_table(table_file_name, folder_name=None):
"""
Read in ``table_file_name`` in ``folder_name``
:param string table_file_name: local file name of table
:param string folder_name: folder to read the table from
:rtype: :class:`tableInfo`
:returns: an object with all of the information in that table
"""
print 'Reading landuse_table file '+table_file_name+'...'
if folder_name is None:
folder_name = os.getcwd()
landuse_classes = {}
with open(os.path.join(folder_name, table_file_name), 'r') as f:
for line in f:
m = re.match(r" +(\d+) +(\d+.\d+) +:(.*)", line)
if m != None:
landuse_classes[int(m.group(1))] = float(m.group(2))
new_table = tableInfo(table_file_name, landuse_classes)
return new_table
def read_tables(folder_name=None):
"""
Read in all ``*.table`` files in folder_name and return a list of tableInfo
objects
:param string folder_name: folder to read the table(s) from
:rtype: list of :class:`tableInfo`
:returns: list of objects with all of the information in that table
"""
if folder_name is None:
folder_name = os.getcwd()
list_of_tables = []
list_of_table_names = glob.glob(os.path.join(folder_name, '*.table'))
for x in list_of_table_names:
x = x[len(folder_name)+1:]
list_of_tables.append(read_table(x, folder_name))
return list_of_tables
def create_gap_list_from_folder(table, folder_name):
"""
Create a list() of :class:`~polyadcirc.pyGriddata.table_management.gapInfo`
objects from the files in folder.
:param string folder_name: folder containing gap formatted files
:rtype: list
:returns: list of :class:`~polyadcirc.pyGriddata.table_management.gapInfo`
objects
"""
gap_files = glob.glob(os.path.join(folder_name, '*.asc'))
return create_gap_list(table, gap_files)
def create_gap_list(table, gap_files):
"""
Create a list() of :class:`~polyadcirc.pyGriddata.table_management.gapInfo`
objects from a list of files.
:param list gap_files: file names of gap formatted files
:rtype: list
:returns: list of :class:`~polyadcirc.pyGriddata.table_management.gapInfo`
objects
"""
gap_list = []
for f in gap_files:
meta_filename = glob.glob(os.path.join(f.rpartition('/')[0], '*.txt'))
with open(meta_filename[0], 'r') as meta_info:
for line in meta_info:
m = re.match(r"UTM map zone", line)
if m != None:
UTM_zone = line.split()[-1]
break
gap_list.append(gapInfo(f, table, 1, UTM_zone))
return gap_list
class gapInfo(pickleable):
"""
This object stores information specific to a GAP dataset, methods for
creating it's portion of the ``*.in`` file, and methods for creating the
table(s) needed for this GAP dataset.
"""
def __init__(self, file_name, table, horizontal_sys=None, UTM_zone=None):
"""
Initalizes a gapInfo object with the information necessary for a
``*.asc``,``*.asc.binary`` file with name ``file_name``
"""
self.file_name = file_name #: Name of GAP/NLCD data file, ``*.asc``
self.horizontal_sys = horizontal_sys
""" Horizontal system: (1) GRS80, (2) NAD83/WGS84, (3) WGS72"""
self.UTM_zone = UTM_zone #: UTM Zone number of GAP/NLCD data.
self.table = table #: :class:`tableInfo` object
super(gapInfo, self).__init__()
def __str__(self):
"""
:rtype: string
:returns: text that matches relevant lines of ``*.in`` file
"""
string_rep = ''
string_rep += "{0:80}! Name of GAP/NLCD data file.\n".format(\
self.file_name)
string_rep += "{0:80}!".format(self.table.file_name)
string_rep += " Name of classified value table.\n"
convert = 'N'
if glob.glob(self.file_name+'.binary') == []:
convert = 'Y'
elif glob.glob(self.file_name+'.binary') == []:
convert = 'Y'
string_rep += "{0:80}!".format(convert)
string_rep += " Convert to ASCII GAP/NLCD data to "
string_rep += "binary(required)? (Y/N)\n"
if self.horizontal_sys != None:
string_rep += "{0:80}!".format('Y')
string_rep += " Convert grid to UTM coordinates(grid"
string_rep += " required to be in UTM coordinates)? (Y/N)\n"
string_rep += "{0:80}!".format(str(self.horizontal_sys))
string_rep += " Select horizontal system: GRS80(1),"
string_rep += " NAD83/WGS84 (2), WGS72 (3)\n"
string_rep += "{0:80}! ".format(str(self.UTM_zone))
string_rep += "UTM Zone number of GAP/NLCD data.\n\n"
else:
string_rep += "{0:80}!".format('N')
string_rep += " Convert grid to UTM coordinates(grid"
string_rep += " required to be in UTM coordinates)? (Y/N)\n\n"
return string_rep
def local_str(self, basis_dir, folder_name=None):
"""
:param string basis_dir: the folder containing the ``*.asc`` files and
the directory folder_name
:param string folder_name: name of folder to create ``*.in`` for
:rtype: string
:returns: text that matches relevant lines of ``*.in`` file and uses
basis_dir for ``*.asc`` files
"""
string_rep = ''
string_rep += "{0:80}! Name of GAP/NLCD data file.\n".format(\
self.file_name)
if folder_name:
table_name = os.path.join(folder_name, self.table.file_name)
else:
table_name = self.table.file_name
string_rep += "{0:80}!".format(table_name)
string_rep += " Name of classified value table.\n"
convert = 'N'
if glob.glob(os.path.join(basis_dir, self.file_name+'.binary')) == []:
convert = 'Y'
else:
convert = 'N'
string_rep += "{0:80}!".format(convert)
string_rep += " Convert to ASCII GAP/NLCD data to "
string_rep += "binary(required)? (Y/N)\n"
if self.horizontal_sys != None:
string_rep += "{0:80}!".format('Y')
string_rep += " Convert grid to UTM coordinates(grid"
string_rep += " required to be in UTM coordinates)? (Y/N)\n"
string_rep += "{0:80}!".format(str(self.horizontal_sys))
string_rep += " Select horizontal system: GRS80(1),"
string_rep += " NAD83/WGS84 (2), WGS72 (3)\n"
string_rep += "{0:80}! ".format(str(self.UTM_zone))
string_rep += "UTM Zone number of GAP/NLCD data.\n\n"
else:
string_rep += "{0:80}!".format('N')
string_rep += " Convert grid to UTM coordinates(grid"
string_rep += " required to be in UTM coordinates)? (Y/N)\n\n"
return string_rep
def create_table_single_value(self, class_num, manningsn_value,
folder_name=None):
"""
Create a ``*.table`` in ``folder_name`` where the landuse classification
numbered class_num is assigned a value of ``manningsn_value`` and all
other landuse classifications are assigned a ``manningsn_value`` of 0.
:param int class_num: land classification number
:param float manningsn_value: Manningn's *n* value for `class_num`
:param string folder_name: folder to create the table in
"""
create_table_single_value(class_num, self.table, manningsn_value,
folder_name)
def create_table(self, folder_name=None):
"""
Create ``self.table_name.table`` in`` folder_name`` where the landuse
classification numbered ``landuse_table.keys()`` is assigned a
``manningsn_value`` of ``landuse_table['key']``.
:param string folder_name: folder to create the table in
"""
create_table(self.table, folder_name)
def read_table(self, folder_name=None):
"""
Read in ``self.table.file_name`` in ``folder_name``
:param string folder_name: folder to read the table from
:rtype: :class:`tableInfo`
:returns: an object with all of the information in that table
"""
return read_table(self.table.file_name, folder_name)
class tableInfo(pickleable):
"""
This class stores the relation between Manning's *n* values and land class
numbers.
"""
def __init__(self, file_name, land_classes):
""" Initializes a tableInfo object associated with file_name,
containing all of the information in file_name
"""
self.file_name = file_name
""" Name of classified value table, ``*.table`` """
self.land_classes = land_classes
""" dict of land classification numbers and associated mannings_n
values """
super(tableInfo, self).__init__()
def get_landclasses(self):
"""
:rtype: list
:returns: list of land_classes (integers)
"""
return self.land_classes.keys()
def get_num_landclasses(self):
"""
:rtype int:
:returns: total number of land_classes """
return len(self.land_classes)
def __str__(self):
"""
:rtype string:
:returns: file_name and the Python string rep of a dict
"""
string_rep = self.file_name+'\n'
string_rep += str(self.land_classes)
return string_rep
def create_table_single_value(self, class_num, manningsn_value,
folder_name=None):
"""
Create a ``*.table`` in ``folder_name`` where the landuse
classification numbered class_num is assigned a value of
``manningsn_value`` and all other landuse classifications are assigned
a manningsn_value of 0.
:param int class_num: land classification number
:param float manningsn_value: Manningn's *n* value for `class_num`
:param string folder_name: folder to create the table in
"""
create_table_single_value(class_num, self, manningsn_value,
folder_name)
def create_table(self, folder_name=None):
"""
Create ``table_name.table`` in`` folder_name`` where the landuse
classification numbered ``landuse_table.keys()`` is assigned a
``manningsn_value`` of ``landuse_table['key']``.
:param string folder_name: folder to create the table in
"""
create_table(self, folder_name)
def read_table(self, folder_name=None):
"""
Read in ``self.file_name`` in ``folder_name`` .
:param string folder_name: folder to read the table from
:rtype: :class:`tableInfo`
:returns: an object with all of the information in that table
"""
return read_table(self.file_name, folder_name)
|
|
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib.contenttypes.generic import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from .models import (TaggedItem, ValuableTaggedItem, Comparison, Animal,
Vegetable, Mineral, Gecko, Rock, ManualPK,
ForProxyModelModel, ForConcreteModelModel,
ProxyRelatedModel, ConcreteRelatedModel)
class GenericRelationsTests(TestCase):
def test_generic_relations(self):
# Create the world in 7 lines of code...
lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo")
platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus"
)
eggplant = Vegetable.objects.create(name="Eggplant", is_yucky=True)
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
# Objects with declared GenericRelations can be tagged directly -- the
# API mimics the many-to-many API.
bacon.tags.create(tag="fatty")
bacon.tags.create(tag="salty")
lion.tags.create(tag="yellow")
lion.tags.create(tag="hairy")
platypus.tags.create(tag="fatty")
self.assertQuerysetEqual(lion.tags.all(), [
"<TaggedItem: hairy>",
"<TaggedItem: yellow>"
])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>"
])
# You can easily access the content object like a foreign key.
t = TaggedItem.objects.get(tag="salty")
self.assertEqual(t.content_object, bacon)
# Recall that the Mineral class doesn't have an explicit GenericRelation
# defined. That's OK, because you can create TaggedItems explicitly.
tag1 = TaggedItem.objects.create(content_object=quartz, tag="shiny")
tag2 = TaggedItem.objects.create(content_object=quartz, tag="clearish")
# However, excluding GenericRelations means your lookups have to be a
# bit more explicit.
ctype = ContentType.objects.get_for_model(quartz)
q = TaggedItem.objects.filter(
content_type__pk=ctype.id, object_id=quartz.id
)
self.assertQuerysetEqual(q, [
"<TaggedItem: clearish>",
"<TaggedItem: shiny>"
])
# You can set a generic foreign key in the way you'd expect.
tag1.content_object = platypus
tag1.save()
self.assertQuerysetEqual(platypus.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: shiny>"
])
q = TaggedItem.objects.filter(
content_type__pk=ctype.id, object_id=quartz.id
)
self.assertQuerysetEqual(q, ["<TaggedItem: clearish>"])
# Queries across generic relations respect the content types. Even
# though there are two TaggedItems with a tag of "fatty", this query
# only pulls out the one with the content type related to Animals.
self.assertQuerysetEqual(Animal.objects.order_by('common_name'), [
"<Animal: Lion>",
"<Animal: Platypus>"
])
# Create another fatty tagged instance with different PK to ensure
# there is a content type restriction in the generated queries below.
mpk = ManualPK.objects.create(id=lion.pk)
mpk.tags.create(tag="fatty")
self.assertQuerysetEqual(Animal.objects.filter(tags__tag='fatty'), [
"<Animal: Platypus>"
])
self.assertQuerysetEqual(Animal.objects.exclude(tags__tag='fatty'), [
"<Animal: Lion>"
])
mpk.delete()
# If you delete an object with an explicit Generic relation, the related
# objects are deleted when the source object is deleted.
# Original list of tags:
comp_func = lambda obj: (
obj.tag, obj.content_type.model_class(), obj.object_id
)
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz.pk),
('fatty', Animal, platypus.pk),
('fatty', Vegetable, bacon.pk),
('hairy', Animal, lion.pk),
('salty', Vegetable, bacon.pk),
('shiny', Animal, platypus.pk),
('yellow', Animal, lion.pk)
],
comp_func
)
lion.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz.pk),
('fatty', Animal, platypus.pk),
('fatty', Vegetable, bacon.pk),
('salty', Vegetable, bacon.pk),
('shiny', Animal, platypus.pk)
],
comp_func
)
# If Generic Relation is not explicitly defined, any related objects
# remain after deletion of the source object.
quartz_pk = quartz.pk
quartz.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz_pk),
('fatty', Animal, platypus.pk),
('fatty', Vegetable, bacon.pk),
('salty', Vegetable, bacon.pk),
('shiny', Animal, platypus.pk)
],
comp_func
)
# If you delete a tag, the objects using the tag are unaffected
# (other than losing a tag)
tag = TaggedItem.objects.order_by("id")[0]
tag.delete()
self.assertQuerysetEqual(bacon.tags.all(), ["<TaggedItem: salty>"])
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz_pk),
('fatty', Animal, platypus.pk),
('salty', Vegetable, bacon.pk),
('shiny', Animal, platypus.pk)
],
comp_func
)
TaggedItem.objects.filter(tag='fatty').delete()
ctype = ContentType.objects.get_for_model(lion)
self.assertQuerysetEqual(Animal.objects.filter(tags__content_type=ctype), [
"<Animal: Platypus>"
])
def test_multiple_gfk(self):
# Simple tests for multiple GenericForeignKeys
# only uses one model, since the above tests should be sufficient.
tiger = Animal.objects.create(common_name="tiger")
cheetah = Animal.objects.create(common_name="cheetah")
bear = Animal.objects.create(common_name="bear")
# Create directly
Comparison.objects.create(
first_obj=cheetah, other_obj=tiger, comparative="faster"
)
Comparison.objects.create(
first_obj=tiger, other_obj=cheetah, comparative="cooler"
)
# Create using GenericRelation
tiger.comparisons.create(other_obj=bear, comparative="cooler")
tiger.comparisons.create(other_obj=cheetah, comparative="stronger")
self.assertQuerysetEqual(cheetah.comparisons.all(), [
"<Comparison: cheetah is faster than tiger>"
])
# Filtering works
self.assertQuerysetEqual(tiger.comparisons.filter(comparative="cooler"), [
"<Comparison: tiger is cooler than cheetah>",
"<Comparison: tiger is cooler than bear>",
], ordered=False)
# Filtering and deleting works
subjective = ["cooler"]
tiger.comparisons.filter(comparative__in=subjective).delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: cheetah is faster than tiger>",
"<Comparison: tiger is stronger than cheetah>"
], ordered=False)
# If we delete cheetah, Comparisons with cheetah as 'first_obj' will be
# deleted since Animal has an explicit GenericRelation to Comparison
# through first_obj. Comparisons with cheetah as 'other_obj' will not
# be deleted.
cheetah.delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: tiger is stronger than None>"
])
def test_gfk_subclasses(self):
# GenericForeignKey should work with subclasses (see #8309)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
valuedtag = ValuableTaggedItem.objects.create(
content_object=quartz, tag="shiny", value=10
)
self.assertEqual(valuedtag.content_object, quartz)
def test_generic_inline_formsets(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet()
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>""")
formset = GenericFormSet(instance=Animal())
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>""")
platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus"
)
platypus.tags.create(tag="shiny")
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet(instance=platypus)
tagged_item_id = TaggedItem.objects.get(
tag='shiny', object_id=platypus.id
).id
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" value="shiny" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" value="%s" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p><p><label for="id_generic_relations-taggeditem-content_type-object_id-1-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-1-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-1-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-1-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-1-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-1-id" id="id_generic_relations-taggeditem-content_type-object_id-1-id" /></p>""" % tagged_item_id)
lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo")
formset = GenericFormSet(instance=lion, prefix='x')
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_x-0-tag">Tag:</label> <input id="id_x-0-tag" type="text" name="x-0-tag" maxlength="50" /></p>
<p><label for="id_x-0-DELETE">Delete:</label> <input type="checkbox" name="x-0-DELETE" id="id_x-0-DELETE" /><input type="hidden" name="x-0-id" id="id_x-0-id" /></p>""")
def test_gfk_manager(self):
# GenericForeignKey should not use the default manager (which may filter objects) #16048
tailless = Gecko.objects.create(has_tail=False)
tag = TaggedItem.objects.create(content_object=tailless, tag="lizard")
self.assertEqual(tag.content_object, tailless)
def test_subclasses_with_gen_rel(self):
"""
Test that concrete model subclasses with generic relations work
correctly (ticket 11263).
"""
granite = Rock.objects.create(name='granite', hardness=5)
TaggedItem.objects.create(content_object=granite, tag="countertop")
self.assertEqual(Rock.objects.filter(tags__tag="countertop").count(), 1)
class CustomWidget(forms.TextInput):
pass
class TaggedItemForm(forms.ModelForm):
class Meta:
model = TaggedItem
fields = '__all__'
widgets = {'tag': CustomWidget}
class GenericInlineFormsetTest(TestCase):
def test_generic_inlineformset_factory(self):
"""
Regression for #14572: Using base forms with widgets
defined in Meta should not raise errors.
"""
Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm)
form = Formset().forms[0]
self.assertIsInstance(form['tag'].field.widget, CustomWidget)
def test_save_new_for_proxy(self):
Formset = generic_inlineformset_factory(ForProxyModelModel,
fields='__all__', for_concrete_model=False)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertEqual(new_obj.obj, instance)
def test_save_new_for_concrete(self):
Formset = generic_inlineformset_factory(ForProxyModelModel,
fields='__all__', for_concrete_model=True)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel)
class ProxyRelatedModelTest(TestCase):
def test_default_behavior(self):
"""
The default for for_concrete_model should be True
"""
base = ForConcreteModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
base = ForConcreteModelModel.objects.get(pk=base.pk)
rel = ConcreteRelatedModel.objects.get(pk=rel.pk)
self.assertEqual(base.obj, rel)
def test_works_normally(self):
"""
When for_concrete_model is False, we should still be able to get
an instance of the concrete class.
"""
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertEqual(base.obj, rel)
def test_proxy_is_returned(self):
"""
Instances of the proxy should be returned when
for_concrete_model is False.
"""
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertIsInstance(base.obj, ProxyRelatedModel)
def test_query(self):
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ConcreteRelatedModel.objects.get(bases__id=base.id))
def test_query_proxy(self):
base = ForProxyModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ProxyRelatedModel.objects.get(bases__id=base.id))
def test_generic_relation(self):
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
rel = ProxyRelatedModel.objects.get(pk=base.obj.pk)
self.assertEqual(base, rel.bases.get())
def test_generic_relation_set(self):
base = ForProxyModelModel()
base.obj = ConcreteRelatedModel.objects.create()
base.save()
newrel = ConcreteRelatedModel.objects.create()
newrel.bases = [base]
newrel = ConcreteRelatedModel.objects.get(pk=newrel.pk)
self.assertEqual(base, newrel.bases.get())
|
|
# Copyright 2015. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cfgm_common import exceptions as vnc_exc
from neutron.common import constants as n_constants
from vnc_api import vnc_api
import contrail_res_handler as res_handler
import vmi_res_handler as vmi_handler
class VNetworkMixin(object):
def neutron_dict_to_vn(self, vn_obj, network_q):
net_name = network_q.get('name')
if net_name:
vn_obj.display_name = net_name
id_perms = vn_obj.get_id_perms()
if 'admin_state_up' in network_q:
id_perms.enable = network_q['admin_state_up']
vn_obj.set_id_perms(id_perms)
if 'contrail:policys' in network_q:
policy_fq_names = network_q['contrail:policys']
# reset and add with newly specified list
vn_obj.set_network_policy_list([], [])
seq = 0
for p_fq_name in policy_fq_names:
domain_name, project_name, policy_name = p_fq_name
domain_obj = vnc_api.Domain(domain_name)
project_obj = vnc_api.Project(project_name, domain_obj)
policy_obj = vnc_api.NetworkPolicy(policy_name, project_obj)
vn_obj.add_network_policy(
policy_obj,
vnc_api.VirtualNetworkPolicyType(
sequence=vnc_api.SequenceType(seq, 0)))
seq = seq + 1
if 'contrail:route_table' in network_q:
rt_fq_name = network_q['contrail:route_table']
if rt_fq_name:
try:
rt_obj = self._vnc_lib.route_table_read(fq_name=rt_fq_name)
vn_obj.set_route_table(rt_obj)
except vnc_api.NoIdError:
# TODO() add route table specific exception
self._raise_contrail_exception(
'NetworkNotFound', net_id=vn_obj.uuid,
resource='network')
return vn_obj
def _get_vn_extra_dict(self, vn_obj):
extra_dict = {}
extra_dict['contrail:fq_name'] = vn_obj.get_fq_name()
extra_dict['contrail:instance_count'] = 0
net_policy_refs = vn_obj.get_network_policy_refs()
if net_policy_refs:
sorted_refs = sorted(
net_policy_refs,
key=lambda t: (t['attr'].sequence.major,
t['attr'].sequence.minor))
extra_dict['contrail:policys'] = [np_ref['to'] for np_ref in
sorted_refs]
rt_refs = vn_obj.get_route_table_refs()
if rt_refs:
extra_dict['contrail:route_table'] = [rt_ref['to'] for rt_ref in
rt_refs]
return extra_dict
def _add_vn_subnet_info(self, vn_obj, net_q_dict, extra_dict=None):
ipam_refs = vn_obj.get_network_ipam_refs()
net_q_dict['subnets'] = []
if not ipam_refs:
return
if extra_dict:
extra_dict['contrail:subnet_ipam'] = []
for ipam_ref in ipam_refs:
subnets = ipam_ref['attr'].get_ipam_subnets()
for subnet in subnets:
sn_id = subnet.subnet_uuid
sn_cidr = '%s/%s' % (subnet.subnet.get_ip_prefix(),
subnet.subnet.get_ip_prefix_len())
net_q_dict['subnets'].append(sn_id)
if not extra_dict:
continue
sn_ipam = {}
sn_ipam['subnet_cidr'] = sn_cidr
sn_ipam['ipam_fq_name'] = ipam_ref['to']
extra_dict['contrail:subnet_ipam'].append(sn_ipam)
def vn_to_neutron_dict(self, vn_obj, contrail_extensions_enabled=False,
fields=None):
net_q_dict = {}
extra_dict = None
id_perms = vn_obj.get_id_perms()
net_q_dict['id'] = vn_obj.uuid
if not vn_obj.display_name:
# for nets created directly via vnc_api
net_q_dict['name'] = vn_obj.get_fq_name()[-1]
else:
net_q_dict['name'] = vn_obj.display_name
net_q_dict['tenant_id'] = self._project_id_vnc_to_neutron(
vn_obj.parent_uuid)
net_q_dict['admin_state_up'] = id_perms.enable
net_q_dict['shared'] = True if vn_obj.is_shared else False
net_q_dict['status'] = (n_constants.NET_STATUS_ACTIVE
if id_perms.enable
else n_constants.NET_STATUS_DOWN)
net_q_dict['router:external'] = (True if vn_obj.router_external
else False)
if contrail_extensions_enabled:
extra_dict = self._get_vn_extra_dict(vn_obj)
self._add_vn_subnet_info(vn_obj, net_q_dict, extra_dict)
if contrail_extensions_enabled:
net_q_dict.update(extra_dict)
if fields:
net_q_dict = self._filter_res_dict(net_q_dict, fields)
return net_q_dict
def get_vn_tenant_id(self, vn_obj):
return self._project_id_vnc_to_neutron(vn_obj.parent_uuid)
class VNetworkCreateHandler(res_handler.ResourceCreateHandler, VNetworkMixin):
resource_create_method = 'virtual_network_create'
def create_vn_obj(self, network_q):
if 'tenant_id' not in network_q:
self._raise_contrail_exception(
'BadRequest', resource='network',
msg="'tenant_id' is mandatory")
net_name = network_q.get('name', None)
project_id = self._project_id_neutron_to_vnc(network_q['tenant_id'])
try:
proj_obj = self._project_read(proj_id=project_id)
except vnc_exc.NoIdError:
self._raise_contrail_exception(
'ProjectNotFound', project_id=project_id, resource='network')
id_perms = vnc_api.IdPermsType(enable=True)
vn_obj = vnc_api.VirtualNetwork(net_name, proj_obj,
id_perms=id_perms)
external_attr = network_q.get('router:external')
if external_attr is not None:
vn_obj.router_external = external_attr
else:
vn_obj.router_external = False
is_shared = network_q.get('shared')
if is_shared is not None:
vn_obj.is_shared = is_shared
else:
vn_obj.is_shared = False
return vn_obj
def resource_create(self, context, network_q):
contrail_extensions_enabled = self._kwargs.get(
'contrail_extensions_enabled', False)
vn_obj = self.neutron_dict_to_vn(self.create_vn_obj(network_q),
network_q)
self._resource_create(vn_obj)
if vn_obj.router_external:
fip_pool_obj = vnc_api.FloatingIpPool('floating-ip-pool', vn_obj)
self._vnc_lib.floating_ip_pool_create(fip_pool_obj)
ret_network_q = self.vn_to_neutron_dict(
vn_obj, contrail_extensions_enabled=contrail_extensions_enabled)
return ret_network_q
class VNetworkUpdateHandler(res_handler.ResourceUpdateHandler, VNetworkMixin):
resource_update_method = 'virtual_network_update'
def _update_external_router_attr(self, router_external, vn_obj):
if router_external and not vn_obj.router_external:
fip_pool_obj = vnc_api.FloatingIpPool('floating-ip-pool',
vn_obj)
self._vnc_lib.floating_ip_pool_create(fip_pool_obj)
else:
fip_pools = vn_obj.get_floating_ip_pools()
for fip_pool in fip_pools or []:
try:
self._vnc_lib.floating_ip_pool_delete(id=fip_pool['uuid'])
except vnc_api.RefsExistError:
self._raise_contrail_exception(
'NetworkInUse', net_id=vn_obj.uuid, resource='network')
def _validate_shared_attr(self, is_shared, vn_obj):
if not is_shared and vn_obj.is_shared:
for vmi in vn_obj.get_virtual_machine_interface_back_refs() or []:
vmi_obj = vmi_handler.VMInterfaceHandler(
self._vnc_lib).get_vmi_obj(vmi['uuid'])
if vmi_obj.parent_type == 'project' and (
vmi_obj.parent_uuid != vn_obj.parent_uuid):
self._raise_contrail_exception(
'InvalidSharedSetting',
network=vn_obj.display_name, resource='network')
def _get_vn_obj_from_net_q(self, network_q):
try:
vn_obj = self._resource_get(id=network_q['id'])
except vnc_exc.NoIdError:
raise self._raise_contrail_exception(
'NetwrokNotFound',
net_id=network_q['id'], resource='network')
router_external = network_q.get('router:external')
if router_external is not None:
if router_external != vn_obj.router_external:
self._update_external_router_attr(router_external, vn_obj)
vn_obj.router_external = router_external
is_shared = network_q.get('shared')
if is_shared is not None:
if is_shared != vn_obj.is_shared:
self._validate_shared_attr(is_shared, vn_obj)
vn_obj.is_shared = is_shared
return vn_obj
def resource_update(self, context, net_id, network_q):
contrail_extensions_enabled = self._kwargs.get(
'contrail_extensions_enabled', False)
network_q['id'] = net_id
vn_obj = self.neutron_dict_to_vn(
self._get_vn_obj_from_net_q(network_q), network_q)
self._resource_update(vn_obj)
ret_network_q = self.vn_to_neutron_dict(
vn_obj, contrail_extensions_enabled=contrail_extensions_enabled)
return ret_network_q
class VNetworkGetHandler(res_handler.ResourceGetHandler, VNetworkMixin):
resource_list_method = 'virtual_networks_list'
resource_get_method = 'virtual_network_read'
detail = False
def _network_list_project(self, project_id, count=False, filters=None):
if project_id:
try:
project_uuid = self._project_id_neutron_to_vnc(project_id)
except Exception:
print("Error in converting uuid %s" % (project_id))
else:
project_uuid = None
if count:
ret_val = self._resource_list(parent_id=project_uuid,
count=True, filters=filters)
else:
ret_val = self._resource_list(parent_id=project_uuid,
detail=True, filters=filters)
return ret_val
# end _network_list_project
def _network_list_shared_and_ext(self):
ret_list = []
nets = self._network_list_project(
project_id=None, filters={'is_shared': True,
'router_external': True})
for net in nets:
if net.get_router_external() and net.get_is_shared():
ret_list.append(net)
return ret_list
# end _network_list_router_external
def _network_list_router_external(self):
ret_list = []
nets = self._network_list_project(
project_id=None, filters={'router_external': True})
for net in nets:
if not net.get_router_external():
continue
ret_list.append(net)
return ret_list
# end _network_list_router_external
def _network_list_shared(self):
ret_list = []
nets = self._network_list_project(
project_id=None, filters={'is_shared': True})
for net in nets:
if not net.get_is_shared():
continue
ret_list.append(net)
return ret_list
# end _network_list_shared
def get_vn_obj(self, id=None, fq_name_str=None):
return self._resource_get(id=id, fq_name_str=fq_name_str)
def get_vn_obj_list(self, **kwargs):
return self._resource_list(**kwargs)
def resource_list(self, context=None, filters=None, fields=None):
contrail_extensions_enabled = self._kwargs.get(
'contrail_extensions_enabled', False)
contrail_exts_enabled = contrail_extensions_enabled
ret_dict = {}
def _collect_without_prune(net_ids):
for net_id in net_ids:
try:
net_obj = self._resource_get(id=net_id)
net_info = self.vn_to_neutron_dict(
net_obj,
contrail_extensions_enabled=contrail_exts_enabled,
fields=fields)
ret_dict[net_id] = net_info
except vnc_exc.NoIdError:
pass
# end _collect_without_prune
# collect phase
all_net_objs = [] # all n/ws in all projects
if context and not context['is_admin']:
if filters and 'id' in filters:
_collect_without_prune(filters['id'])
elif filters and 'name' in filters:
net_objs = self._network_list_project(context['tenant'])
all_net_objs.extend(net_objs)
all_net_objs.extend(self._network_list_shared())
all_net_objs.extend(self._network_list_router_external())
elif (filters and 'shared' in filters and filters['shared'][0] and
'router:external' not in filters):
all_net_objs.extend(self._network_list_shared())
elif (filters and 'router:external' in filters and
'shared' not in filters):
all_net_objs.extend(self._network_list_router_external())
elif (filters and 'router:external' in filters and
'shared' in filters):
all_net_objs.extend(self._network_list_shared_and_ext())
else:
project_uuid = self._project_id_neutron_to_vnc(
context['tenant'])
if not filters:
all_net_objs.extend(self._network_list_router_external())
all_net_objs.extend(self._network_list_shared())
all_net_objs.extend(self._network_list_project(project_uuid))
# admin role from here on
elif filters and 'tenant_id' in filters:
# project-id is present
if 'id' in filters:
# required networks are also specified,
# just read and populate ret_dict
# prune is skipped because all_net_objs is empty
_collect_without_prune(filters['id'])
else:
# read all networks in project, and prune below
proj_ids = self._validate_project_ids(context,
filters['tenant_id'])
for p_id in proj_ids:
all_net_objs.extend(self._network_list_project(p_id))
if 'router:external' in filters:
all_net_objs.extend(self._network_list_router_external())
elif filters and 'id' in filters:
# required networks are specified, just read and populate ret_dict
# prune is skipped because all_net_objs is empty
_collect_without_prune(filters['id'])
elif filters and 'name' in filters:
net_objs = self._network_list_project(None)
all_net_objs.extend(net_objs)
elif filters and 'shared' in filters:
if filters['shared'][0]:
nets = self._network_list_shared()
for net in nets:
net_info = self.vn_to_neutron_dict(
net, contrail_extensions_enabled=contrail_exts_enabled,
fields=fields)
ret_dict[net.uuid] = net_info
elif filters and 'router:external' in filters:
nets = self._network_list_router_external()
if filters['router:external'][0]:
for net in nets:
net_info = self.vn_to_neutron_dict(
net, contrail_extensions_enabled=contrail_exts_enabled,
fields=fields)
ret_dict[net.uuid] = net_info
else:
# read all networks in all projects
all_net_objs.extend(self._resource_list(detail=True))
# prune phase
for net_obj in all_net_objs:
if net_obj.uuid in ret_dict:
continue
net_fq_name = unicode(net_obj.get_fq_name())
if not self._filters_is_present(
filters, 'contrail:fq_name', net_fq_name):
continue
if not self._filters_is_present(
filters, 'name',
net_obj.get_display_name() or net_obj.name):
continue
if net_obj.is_shared is None:
is_shared = False
else:
is_shared = net_obj.is_shared
if not self._filters_is_present(
filters, 'shared', is_shared):
continue
if net_obj.get_id_perms() is None:
admin_state_up = False
else:
admin_state_up = net_obj.get_id_perms().enable
if not self._filters_is_present(filters, 'admin_state_up',
admin_state_up):
continue
try:
net_info = self.vn_to_neutron_dict(
net_obj, contrail_extensions_enabled=contrail_exts_enabled,
fields=fields)
except vnc_exc.NoIdError:
continue
ret_dict[net_obj.uuid] = net_info
ret_list = []
for net in ret_dict.values():
ret_list.append(net)
return ret_list
def resource_get(self, context, net_uuid, fields=None):
contrail_extensions_enabled = self._kwargs.get(
'contrail_extensions_enabled', False)
try:
vn_obj = self._resource_get(id=net_uuid)
except vnc_exc.NoIdError:
self._raise_contrail_exception(
'NetworkNotFound', net_id=net_uuid, resource='network')
return self.vn_to_neutron_dict(
vn_obj, contrail_extensions_enabled, fields=fields)
def resource_count(self, context, filters):
count = self._resource_count_optimized(filters)
if count is not None:
return count
nets_info = self.resource_list(context=None, filters=filters)
return len(nets_info)
def get_vn_list_project(self, project_id, count=False):
if project_id:
try:
project_uuid = self._project_id_neutron_to_vnc(project_id)
except ValueError:
project_uuid = None
else:
project_uuid = None
if count:
ret_val = self._resource_list(parent_id=project_uuid,
count=True)
else:
ret_val = self._resource_list(parent_id=project_uuid,
detail=True)
return ret_val
def vn_list_shared(self):
ret_list = []
nets = self.get_vn_list_project(project_id=None)
for net in nets:
if not net.get_is_shared():
continue
ret_list.append(net)
return ret_list
class VNetworkDeleteHandler(res_handler.ResourceDeleteHandler):
resource_delete_method = 'virtual_network_delete'
def resource_delete(self, context, net_id):
try:
vn_obj = self._resource_get(id=net_id)
except vnc_api.NoIdError:
return
try:
fip_pools = vn_obj.get_floating_ip_pools()
for fip_pool in fip_pools or []:
self._vnc_lib.floating_ip_pool_delete(id=fip_pool['uuid'])
self._resource_delete(id=net_id)
except vnc_api.RefsExistError:
self._raise_contrail_exception('NetworkInUse', net_id=net_id,
resource='network')
class VNetworkHandler(VNetworkGetHandler,
VNetworkCreateHandler,
VNetworkUpdateHandler,
VNetworkDeleteHandler):
pass
|
|
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update esx_cluster command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestUpdateESXCluster(TestBrokerCommand):
def test_100_updatenoop(self):
default_max = self.config.get("archetype_esx_cluster",
"max_members_default")
self.noouttest(["update_esx_cluster", "--cluster=utecl4",
"--building=ut"])
def test_110_verifynoop(self):
command = "show esx_cluster --cluster utecl4"
out = self.commandtest(command.split(" "))
default_ratio = self.config.get("archetype_esx_cluster",
"vm_to_host_ratio")
default_max = self.config.get("archetype_esx_cluster",
"max_members_default")
self.matchoutput(out, "ESX Cluster: utecl4", command)
self.matchoutput(out, "Metacluster: utmc2", command)
self.matchoutput(out, "Building: ut", command)
self.matchoutput(out, "Max members: %s" % default_max, command)
self.matchoutput(out, "vm_to_host_ratio: %s" % default_ratio, command)
self.matchoutput(out, "Personality: vulcan-1g-desktop-prod Archetype: esx_cluster",
command)
self.matchclean(out, "Comments", command)
def test_200_updateutecl2(self):
command = ["update_esx_cluster", "--cluster=utecl2",
"--max_members=97", "--vm_to_host_ratio=5:1",
"--comments", "ESX Cluster with a new comment",
"--memory_capacity", 16384,
"--down_hosts_threshold=0"]
self.noouttest(command)
def test_210_verifyutecl2(self):
command = "show esx_cluster --cluster utecl2"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "ESX Cluster: utecl2", command)
self.matchoutput(out, "Metacluster: utmc1", command)
self.matchoutput(out, "Building: ut", command)
self.matchoutput(out, "Max members: 97", command)
self.matchoutput(out, "vm_to_host_ratio: 5:1", command)
self.matchoutput(out, "Down Hosts Threshold: 0", command)
self.matchoutput(out, "Capacity limits: memory: 16384 [override]",
command)
self.matchoutput(out, "Personality: vulcan-1g-desktop-prod Archetype: esx_cluster",
command)
self.matchoutput(out, "Comments: ESX Cluster with a new comment",
command)
def test_220_verifysearchoverride(self):
command = ["search_esx_cluster", "--capacity_override"]
out = self.commandtest(command)
self.matchclean(out, "utecl1", command)
self.matchoutput(out, "utecl2", command)
self.matchclean(out, "utecl3", command)
self.matchclean(out, "utecl4", command)
self.matchclean(out, "utecl5", command)
def test_225_verifynooverrideflag(self):
command = ["show_esx_cluster", "--cluster=utecl1"]
out = self.commandtest(command)
self.matchclean(out, "override", command)
def test_230_failupdateutecl2(self):
command = ["update_esx_cluster", "--cluster", "utecl2",
"--memory_capacity", 1024]
out = self.badrequesttest(command)
self.matchoutput(out,
"ESX Cluster utecl2 is over capacity regarding memory",
command)
def test_240_clearoverrideutecl2(self):
command = ["update_esx_cluster", "--cluster", "utecl2",
"--clear_overrides"]
self.noouttest(command)
def test_250_verifyclearoverride(self):
command = ["show_esx_cluster", "--cluster", "utecl2"]
out = self.commandtest(command)
self.matchoutput(out, "Capacity limits: memory: 157236", command)
def test_260_verifyclearsearchoverride(self):
command = ["search_esx_cluster", "--capacity_override"]
out = self.commandtest(command)
self.matchclean(out, "utecl2", command)
def test_300_updateutecl3(self):
# Testing both that an empty cluster can have its personality
# updated and that personality without archetype will assume
# the current archetype.
command = ["update_esx_cluster", "--cluster=utecl3",
"--personality=vulcan-1g-desktop-prod"]
self.noouttest(command)
def test_310_verifyutecl3(self):
command = "show esx_cluster --cluster utecl3"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "ESX Cluster: utecl3", command)
self.matchoutput(out, "Metacluster: utmc1", command)
self.matchoutput(out, "Building: ut", command)
self.matchoutput(out, "Personality: vulcan-1g-desktop-prod Archetype: esx_cluster",
command)
def test_320_updateutecl1(self):
command = ["update_esx_cluster", "--cluster=utecl1", "--rack=ut10"]
self.noouttest(command)
def test_330_updateutecl1switch(self):
# Deprecated.
command = ["update_esx_cluster", "--cluster=utecl1",
"--switch=ut01ga1s04.aqd-unittest.ms.com"]
self.successtest(command)
def test_340_updateutecl1switchfail(self):
# Try something that is not a tor_switch
command = ["update_esx_cluster", "--cluster=utecl1",
"--switch=unittest02.one-nyp.ms.com"]
self.badrequesttest(command)
def test_350_failupdatelocation(self):
command = ["update_esx_cluster", "--cluster=utecl1", "--rack=ut3"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Cannot set ESX Cluster utecl1 location constraint "
"to Rack ut3:",
command)
def test_360_failupdatenoncampus(self):
command = ["update_esx_cluster", "--cluster=utecl1", "--country=us"]
out = self.badrequesttest(command)
self.matchoutput(out, "Country us is not within a campus",
command)
def test_370_updatepersonality(self):
command = ["search_host", "--cluster=utecl1",
"--personality=vulcan-1g-desktop-prod"]
original_hosts = self.commandtest(command).splitlines()
original_hosts.sort()
self.failUnless(original_hosts, "No hosts found using %s" % command)
# Also test that the host plenary will be re-written correctly.
command = ["cat", "--hostname", original_hosts[0]]
out = self.commandtest(command)
self.matchoutput(out,
"""include { "personality/vulcan-1g-desktop-prod/config" };""",
command)
command = ["reconfigure", "--membersof=utecl1",
"--archetype=vmhost",
"--osname=esxi", "--osver=4.1.0-u1"]
out = self.successtest(command)
command = ["search_host", "--cluster=utecl1",
"--osversion=4.1.0-u1"]
updated_hosts = self.commandtest(command).splitlines()
updated_hosts.sort()
self.failUnless(updated_hosts, "No hosts found using %s" % command)
self.failUnlessEqual(original_hosts, updated_hosts,
"Expected only/all updated hosts %s to match the "
"list of original hosts %s" %
(updated_hosts, original_hosts))
command = ["cat", "--hostname", updated_hosts[0]]
out = self.commandtest(command)
self.matchoutput(out,
"""include { "os/esxi/4.1.0-u1/config" };""",
command)
command = ["reconfigure", "--membersof=utecl1",
"--archetype=vmhost", "--osname=esxi",
"--osversion=4.0.0"]
out = self.successtest(command)
def test_380_failupdatearchetype(self):
# If personality is not specified the current personality name
# is assumed for the new archetype.
command = ["reconfigure", "--membersof=utecl1",
"--archetype=windows"]
out = self.badrequesttest(command)
# The command complains both about the broker personality and OS.
self.matchoutput(out,
"No personality vulcan-1g-desktop-prod found for "
"archetype windows.",
command)
self.matchoutput(out,
"Cannot change archetype because operating system "
"vmhost/esxi-4.0.0 needs archetype vmhost.",
command)
def test_390_failupdatemaxmembers(self):
command = ["update_esx_cluster", "--cluster=utecl1", "--max_members=0"]
out = self.badrequesttest(command)
self.matchoutput(out,
"ESX Cluster utecl1 has 3 hosts bound, which exceeds "
"the requested limit 0.",
command)
def test_400_failupdateratio(self):
command = ["update_esx_cluster", "--cluster=utecl1",
"--vm_to_host_ratio=0"]
out = self.badrequesttest(command)
self.matchoutput(out, "violates ratio", command)
def test_400_failupdateillegalratio(self):
command = ["update_esx_cluster", "--cluster=utecl1",
"--vm_to_host_ratio=not-a:number"]
out = self.badrequesttest(command)
self.matchoutput(out, "Expected a ratio like", command)
def test_410_failupdaterealratio(self):
command = ["update_esx_cluster", "--cluster=utecl1",
"--vm_to_host_ratio=2:1000"]
out = self.badrequesttest(command)
self.matchoutput(out, "violates ratio", command)
def test_420_failupdatedht(self):
command = ["update_esx_cluster", "--cluster=utecl1",
"--down_hosts_threshold=4"]
out = self.badrequesttest(command)
self.matchoutput(out, "cannot support VMs", command)
def test_450_verifyutecl1(self):
default_max = self.config.get("archetype_esx_cluster",
"max_members_default")
default_ratio = self.config.get("archetype_esx_cluster",
"vm_to_host_ratio")
command = "show esx_cluster --cluster utecl1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "ESX Cluster: utecl1", command)
self.matchoutput(out, "Metacluster: utmc1", command)
self.matchoutput(out, "Rack: ut10", command)
self.matchoutput(out, "Max members: %s" % default_max, command)
self.matchoutput(out, "vm_to_host_ratio: %s" % default_ratio, command)
self.matchoutput(out, "Personality: vulcan-1g-desktop-prod Archetype: esx_cluster",
command)
self.matchoutput(out, "Switch: ut01ga1s04.aqd-unittest.ms.com",
command)
self.matchoutput(out, "Capacity limits: memory: 78618", command)
self.matchoutput(out, "Resources used by VMs: memory: 32768", command)
def test_460_searchswitch(self):
command = ["search", "esx", "cluster", "--switch",
"ut01ga1s04.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "utecl1", command)
self.matchclean(out, "utecl2", command)
def test_500_failmissingcluster(self):
command = ["update_esx_cluster", "--cluster=cluster-does-not-exist",
"--comments=test should fail"]
out = self.notfoundtest(command)
self.matchoutput(out, "Cluster cluster-does-not-exist not found",
command)
def test_600_updatethreshold(self):
cname = "utecl7"
command = ["update_esx_cluster", "--cluster=%s" % cname,
"--down_hosts_threshold=1%",
"--maint_threshold=50%"]
out = self.successtest(command)
## verify show
command = "show esx_cluster --cluster %s" % cname
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Down Hosts Threshold: 0 (1%)", command)
self.matchoutput(out, "Maintenance Threshold: 2 (50%)", command)
## verify cat
command = "cat --cluster=%s --data" % cname
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"system/cluster/down_hosts_threshold" = 0;',
command)
self.matchoutput(out, '"system/cluster/down_maint_threshold" = 2;',
command)
self.matchoutput(out, '"system/cluster/down_hosts_as_percent" = true;',
command)
self.matchoutput(out, '"system/cluster/down_maint_as_percent" = true;',
command)
self.matchoutput(out, '"system/cluster/down_hosts_percent" = 1;',
command)
self.matchoutput(out, '"system/cluster/down_maint_percent" = 50;',
command)
def test_605_compileforthreshold(self):
cname = "utecl7"
command = "compile --cluster=%s" % cname
out = self.successtest(command.split(" "))
# FIXME: Need tests for plenary templates
# FIXME: Include test that machine plenary moved correctly
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateESXCluster)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# -*- coding: utf-8 -*-
from __future__ import division
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy.ext.declarative import declared_attr
from . import db, lm
from app import db
# from app import app
from flask import current_app
import random
import datetime
from sqlalchemy import BigInteger, Integer, Boolean, Unicode,\
Float, UnicodeText, Text, String, DateTime, PickleType,\
SmallInteger, Enum, Float
from sqlalchemy.schema import Table, MetaData, Column, ForeignKey
from sqlalchemy.orm import relationship, backref, class_mapper
from sqlalchemy.types import TypeDecorator
from sqlalchemy import event, text
from sqlalchemy.engine import reflection
from sqlalchemy import create_engine
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy import desc
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy import select, func
from sqlalchemy.schema import UniqueConstraint
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import SubElement
import xml.etree.cElementTree as ET
def findField(str, root, msg = None):
try:
field=root.find(str).text
return field
except:
if msg !=None :
msg.append(str + " not found")
return None
def make_timestamp():
return datetime.datetime.utcnow()
class Survey(db.Model):
'''A table with Survey
'''
__tablename__ = 'survey'
#: unique id (automatically generated)
id = Column(Integer, primary_key = True)
#: Tittle for this Survey
title = Column(String(128), nullable = False)
#: description for this Survey
description = Column(String(1200), default="")
#: created timestamp (automatically set)
created = Column(DateTime, default = make_timestamp)
#: DateTime init survey
startDate = Column(DateTime, default = make_timestamp)
#: DateTime finish survey
endDate = Column(DateTime, default = make_timestamp)
#: max number of respondents, 0 is infinite
maxNumberRespondents = Column(Integer, default = 0)
#: Time in minutes that a user has to answer the survey
duration = Column(Integer, default = 0)
## Relationships
#: Survey have zero or more consents
consents = relationship('Consent',
cascade="all, delete-orphan",
backref = 'survey', lazy = 'dynamic')
#: Survey have zero or more sections children
sections = relationship('Section',
foreign_keys="Section.survey_id",
cascade="all, delete-orphan",
backref = 'survey', lazy = 'dynamic',
order_by= 'Section.sequence')
# #: Survey have zero or more questions
sections_all = relationship('Section',
backref = 'root',
lazy = 'dynamic',
foreign_keys="Section.root_id")
#: Survey have zero or more stateSurvey
stateSurveys = relationship('StateSurvey', backref = 'survey', lazy = 'dynamic')
#: Survey belong to a one user(researcher)
researcher_id = Column(Integer, ForeignKey('user.id'), nullable=False)
def __repr__(self):
return "<Survey(id='%s', title='%s')>" % (
self.id, self.title)
def number_respondents(self):
return StateSurvey.query.filter(
StateSurvey.status.op('&')(StateSurvey.FINISH_OK),
StateSurvey.survey_id==self.id).count()
def is_duration(self):
'''Return true if the survey have max duratin
'''
return self.duration is not None and \
self.duration!=0 and self.duration!=""
def to_json(self):
json_survey = {
'title': self.title,
'description': self.description,
'created': self.created,
'startDate': self.startDate,
'endDate': self.endDate,
'maxNumberRespondents': self.maxNumberRespondents,
# 'consents': self.consents.to_json(),
}
return json_survey
def to_xml(self):
'''write file:
tree.write("output.xml",encoding="ISO-8859-1", method="xml")
'''
survey = Element('survey')
title = SubElement(survey,'title')
title.text = self.title
description = SubElement(survey,'description')
description.text = self.description
# startDate = SubElement(survey,'startDate')
# startDate.text = str(self.startDate)
# endDate = SubElement(survey,'endDate')
# endDate.text = str(self.endDate)
maxNumberRespondents = SubElement(survey,'maxNumberRespondents')
maxNumberRespondents.text = str(self.maxNumberRespondents)
duration = SubElement(survey,'duration')
duration.text = str(self.duration)
for consent in self.consents:
survey.append(consent.to_xml())
for section in self.sections:
survey.append(section.to_xml())
tree = ET.ElementTree(survey)
return tree
@staticmethod
def from_xml(file, user):
# root = ET.parse('output.xml')
root = ET.parse(file)
msg = []
title = findField('title',root,msg)
description = findField('description',root,msg)
# startDate = findField('startDate',root,msg)
# endDate = findField('endDate',root,msg)
maxNumberRespondents = findField('maxNumberRespondents',root,msg)
duration = findField('duration',root,msg)
survey = Survey(title = title, description = description,
# startDate = startDate, endDate = endDate,
maxNumberRespondents = maxNumberRespondents,
duration = duration,
researcher = user)
db.session.add(survey)
for consent in root.findall('consent'):
Consent.from_xml(consent,survey)
for section in root.findall('section'):
Section.from_xml(section,survey,msg)
try:
db.session.commit()
msg.append("Your survey have been saved.")
except :
msg.append("file xml bad")
raise
db.session.rollback()
return msg, survey if survey is not None else None
class Consent(db.Model):
'''A table with Consents to a Survey
'''
__tablename__ = 'consent'
#: unique id (automatically generated)
id = Column(Integer, primary_key = True)
#: Text for this consents
text = Column(String, nullable = False)
## Relationships
survey_id = Column(Integer, ForeignKey('survey.id'))
def to_json(self):
json_survey = {
'text': self.text,
}
return json_survey
def to_xml(self):
consent = Element('consent')
consent.text = self.text
return consent
@staticmethod
def from_xml(cons,survey):
consent = Consent(text = cons.text,
survey = survey)
db.session.add(consent)
class Section(db.Model):
'''A table with sections of a Survey
'''
__tablename__ = 'section'
#: unique id (automatically generated)
id = Column(Integer, primary_key = True)
#: Tittle for this section
title = Column(String(128), nullable = False)
#: description for this section
description = Column(String, default="")
#: sequence of the section
#If two or more sections of the same survey with the same sequence,
# is chosen at random which is done first
sequence = Column(Integer, default = 1)
#:Percentage of Respondents who pass through this section
percent = Column(Float, default = 1)
#: created timestamp (automatically set)
#created = Column(DateTime, default = make_timestamp)
## Relationships
#: Section have zero or more questions
questions = relationship('Question',
# cascade deletions
cascade="all, delete-orphan",
backref = 'section', lazy = 'dynamic',
order_by = 'Question.position')
#: section belongs to zero or one surveys
survey_id = Column(Integer, ForeignKey('survey.id'))
#: section belongs to zero or more sections
parent_id = Column(Integer, ForeignKey(id))
# survey root
root_id = Column(Integer, ForeignKey('survey.id'))
children = relationship('Section',
# cascade deletions
cascade="all, delete-orphan",
backref=backref('parent', remote_side=id),
lazy = 'dynamic', uselist = True,
order_by= 'Section.sequence')
def __init__(self, **kwargs):
super(Section, self).__init__(**kwargs)
section = self
while section.parent is not None:
section = section.parent
self.root = section.survey
def __repr__(self):
return "<Section(id='%s', title='%s')>" % (
self.id, self.title)
def duplicate(self):
'''duplicate a section'''
def _duplicate(s_parent,section):
section_cp = Section(title= section.title, description=section.description,\
sequence=section.sequence, percent=section.percent,\
parent= s_parent)
db.session.add(section_cp)
for question in section.questions:
question.duplicate(section_cp)
for s in section.children:
_duplicate(section_cp,s)
section_cp = Section(title= self.title, description=self.description,\
sequence=self.sequence, percent=self.percent,\
parent= self.parent, survey=self.survey)
db.session.add(section_cp)
for question in self.questions:
question.duplicate(section_cp)
# _duplicate_question(section_cp,question)
for s in self.children:
_duplicate(section_cp,s)
db.session.commit()
def to_xml(self):
section = Element('section')
title = SubElement(section,'title')
title.text = self.title
description = SubElement(section,'description')
description.text = self.description
sequence = SubElement(section,'sequence')
sequence.text = str(self.sequence)
percent = SubElement(section,'percent')
percent.text = str(self.percent)
for question in Question.query.filter\
(Question.section_id==self.id, Question.parent==None):
section.append(question.to_xml())
for children in self.children:
section.append(children.to_xml())
return section
@staticmethod
def from_xml(root,survey,msg):
def from_xml_subSection(root,parent,msg):
title = findField('title',root,msg)
description = findField('description',root,msg)
sequence = findField('sequence',root,msg)
percent = findField('percent',root,msg)
section = Section(title = title,
description = description,
sequence = sequence,
percent = percent,
parent = parent
)
db.session.add(section)
position=1
for q in root.findall('question'):
position = Question.from_xml(q, section,position, msg)
for s in root.findall('section'):
from_xml_subSection(s,section,msg)
title = findField('title',root,msg)
description = findField('description',root,msg)
sequence = findField('sequence',root,msg)
percent = findField('percent',root,msg)
section = Section(title = title,
description = description,
sequence = sequence,
percent = percent,
survey = survey
)
db.session.add(section)
position = 1
for q in root.findall('question'):
position = Question.from_xml(q, section, position, msg)
for s in root.findall('section'):
from_xml_subSection(s,section,msg)
@staticmethod
def sequenceSections(sections):
'''Sections: are order by sequence
generates the order in which sections are traversed, only return de id of sections
'''
iMin = 0
lAux = []
l2Aux= []
if sections.count()==0:
return []
for index,section in enumerate(sections):
if (sections[iMin].sequence!=section.sequence):
#generamos una sublista aleatoria de elementos con la msima secuencia
lAux.extend(random.sample(sections[iMin:index],index-iMin))
iMin=index
#caso para el ultimo tramo de elemento
lAux.extend(random.sample(sections[iMin:sections.count()],sections.count()-iMin))
#comprobamos el porcentaje de los que pasan por cada seccion, La suma del porcentaje
#de todas las secciones (del mismo nivel y rama) deben sumar 1, si es 1 ignoramos
ran =random.random()
percent=0
insert = False
for index,section in enumerate(lAux):
if section.percent!=1:
percent = section.percent+percent
#Si es uno, se deja
if section.percent == 1:
pass
#Si el porcentaje es mayor que el aleatorio, se deja, el resto se borraran
elif (percent >ran) and (not insert):
insert = True
else:
del lAux[index]
#ya tenemos los "padres" ordenados aleatoriamente, ahora toca los hijos
for section in lAux:
# l2Aux.append(section)
# Si la seccion no es vacia sin descripcion o preguntas, metemos la id de la seccion
if not((section.description is None or len(section.description)==0)
and section.questions.count()==0):
l2Aux.append(section.id)
l2Aux.extend(Section.sequenceSections(Section.query.filter(
Section.parent_id==section.id).order_by(Section.sequence)))
return l2Aux
def are_elements_equal(x, y):
return x == y
class Question(db.Model):
'''A table with Questions
'''
__tablename__ = 'question'
#: unique id (automatically generated)
id = Column(Integer, primary_key = True)
#: Text for this question
text = Column(String, nullable = False)
#: position
position = Column(Integer, default =1)
#: If the question is obligatory or not
required = Column(Boolean, default = True)
#: possible choices
choices = Column(PickleType(comparator=are_elements_equal))
#: container to save whatever you want
container = Column(PickleType(comparator=are_elements_equal))
#:expected answer
expectedAnswer = Column(String(20), default="")
#:number of attempt to answer a question with expected Answer
# zero is infinite attempt to get the right answer
maxNumberAttempt = Column(Integer, default = 0)
# # type of decision
decision = Column(Enum('none','part_two', 'decision_one_v1', 'decision_one_v2',
'decision_two','decision_three','decision_four','decision_five',
'decision_six'),default='none')
# decision with real money
is_real_money = Column(Boolean, default=False)
#: Type of question, discriminate between classes
type = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': type,
'order_by':position}
## Relationships
#: question belon to one survey
# survey_id = Column(Integer, ForeignKey('survey.id'))
#: Question belong to one section
section_id = Column(Integer, ForeignKey('section.id'),nullable=False)
#: Question have zero or more answers
answers = relationship('Answer', backref = 'question', lazy = 'dynamic')
#: section belongs to zero or more sections
parent_id = Column(Integer, ForeignKey(id))
# Question depend on the answer of "question parent"
subquestions = relationship('Question',
# cascade deletions
cascade="all, delete-orphan",
backref=backref('parent', remote_side=id),
lazy = 'dynamic', uselist = True)
condition_id = Column(Integer, ForeignKey('condition.id'))
condition = relationship("Condition",
cascade="all, delete-orphan",
backref=backref("question", uselist=False),
single_parent=True)
def __repr__(self):
return "<question(id='%s')>" % (
self.id)
@hybrid_property
def survey(self):
return self.section.root
def last_position(self):
question = Question.query.\
filter(Question.section==self.section).\
order_by(desc(Question.position)).first()
if question is None:
self.position=1
else:
self.position= question.position+1
@hybrid_property
def isSubquestion(self):
return self.parent is not None
def isExpectedAnswer(self):
'''return if there is a expected answer
'''
return len(self.expectedAnswer)>0
def duplicate(self,section):
'''no check if is a subquestions...
'''
if isinstance(self, QuestionYN):
question=QuestionYN()
if isinstance (self, QuestionText):
question = QuestionText(isNumber=self.isNumber,
isNumberFloat=self.isNumberFloat,
regularExpression=self.regularExpression,
errorMessage=self.errorMessage)
if isinstance (self,QuestionChoice):
if self.is_range:
question = QuestionChoice(range_min = self.range_min,
range_max = self.range_max, range_step = self.range_step)
else:
question = QuestionChoice(choices= self.choices[:])
question.render = self.render
if isinstance (self, QuestionLikertScale):
question = QuestionLikertScale(minLikert=self.minLikert,
maxLikert=self.maxLikert, labelMin=self.labelMinLikert,
labelMax=self.labelMaxLikert)
if self.container is not None:
question.container = self.container[:]
question.decision=self.decision
question.is_real_money= self.is_real_money
question.text = self.text
question.required = self.required
question.expectedAnswer = self.expectedAnswer
question.maxNumberAttempt = self.maxNumberAttempt
question.section = section
question.position = self.position
db.session.add(question)
def to_xml(self):
question = Element('question')
type = SubElement(question,'type')
type.text = self.type
text1 = SubElement(question,'text')
text1.text = self.text
required = SubElement(question,'required')
required.text = str(self.required)
money = SubElement(question,'money')
money.text = str(self.is_real_money)
decision = SubElement(question,'decision')
decision.text = self.decision
if self.choices !=None:
for choice in self.choices:
c = SubElement(question,'choice')
c.text = choice
if self.container !=None:
for element in self.container:
c = SubElement(question,'container')
c.text = element
expectedAnswer = SubElement(question,'expectedAnswer')
expectedAnswer.text = self.expectedAnswer
maxNumberAttempt = SubElement(question,'maxNumberAttempt')
maxNumberAttempt.text = str(self.maxNumberAttempt)
if isinstance (self, QuestionText):
isNumber = SubElement(question,'isNumber')
isNumber.text = str(self.isNumber)
isNumberFloat = SubElement(question,'isNumberFloat')
isNumberFloat.text = str(self.isNumberFloat)
regularExpression = SubElement(question,'regularExpression')
regularExpression.text = self.regularExpression
errorMessage = SubElement(question,'errorMessage')
errorMessage.text = self.errorMessage
if isinstance(self, QuestionChoice):
if self.range_min is not None:
range_min = SubElement(question,'range_min')
range_min.text = str(self.range_min)
if self.range_max is not None:
range_max = SubElement(question,'range_max')
range_max.text = str(self.range_max)
if self.range_step is not None:
range_step = SubElement(question,'range_step')
range_step.text = str(self.range_step)
render = SubElement(question,'render')
render.text = str(self.render)
if isinstance (self, QuestionLikertScale):
minLikert = SubElement(question,'minLikert')
minLikert.text = str(self.minLikert)
maxLikert = SubElement(question,'maxLikert')
maxLikert.text = str(self.maxLikert)
labelMin = SubElement(question,'labelMin')
labelMin.text = self.labelMin
labelMax = SubElement(question,'labelMax')
labelMax.text = self.labelMax
if self.condition is not None:
question.append(self.condition.to_xml())
for subquestion in self.subquestions:
question.append(subquestion.to_xml())
return question
@staticmethod
def from_xml(root,section,position,msg):
def fill_question(root,section,position,msg):
texto = findField('text',root,msg)
required = (findField('required',root,msg) =="True")
money = (findField('money',root,msg) =="True")
decision = findField('decision',root,msg)
#CHOICES = findField('sequence',root,msg)
l=[]
for choice in root.findall('choice'):
l.append(choice.text)
l1=[]
for choice in root.findall('container'):
l1.append(choice.text)
expectedAnswer = findField('expectedAnswer',root,msg)
maxNumberAttempt = findField('maxNumberAttempt',root,msg)
type = findField('type',root,msg)
if type == 'yn':
question = QuestionYN()
elif type == 'text':
isNumber = (findField('isNumber',root,msg)=="True")
isNumberFloat = (findField('isNumberFloat',root,msg)=="True")
regularExpression = findField('regularExpression',root,msg)
errorMessage = findField('errorMessage',root,msg)
question = QuestionText(isNumber=isNumber,
isNumberFloat=isNumberFloat,
regularExpression=regularExpression,
errorMessage=errorMessage)
elif type == 'choice':
range_min = findField('range_min',root,msg)
range_max = findField('range_max',root,msg)
range_step = findField('range_step',root,msg)
render = (findField('render',root,msg))
question = QuestionChoice(range_min=range_min,
range_max= range_max,
range_step = range_step,
render=render)
elif type == 'likertScale':
minLikert = findField('minLikert',root,msg)
maxLikert = findField('maxLikert',root,msg)
labelMin = findField('labelMin',root,msg)
labelMax = findField('labelMax',root,msg)
question = QuestionLikertScale(minLikert=minLikert,
maxLikert=maxLikert,
labelMin=labelMin,
labelMax=labelMax)
else:
return False
question.text = texto
question.is_real_money = money
question.decision = decision
question.required = required
question.expectedAnswer = expectedAnswer
question.maxNumberAttempt = maxNumberAttempt
question.choices = l
question.container = l1
question.section = section
question.position=position
return question, position+1
def from_xml_subquestion(root,section,parent,position,msg):
question,position = fill_question(root,section,position,msg)
question.parent=parent
db.session.add(question)
for c in root.findall('condition'):
Condition.from_xml(c, question, msg)
for q in root.findall('question'):
position = from_xml_subquestion(q, section, question,position, msg)
return position
question, position = fill_question(root,section,position,msg)
db.session.add(question)
for q in root.findall('question'):
position = from_xml_subquestion(q, section, question,position, msg)
return position
class QuestionYN(Question):
'''Question of type yes or no
'''
__mapper_args__ = {'polymorphic_identity': 'yn'}
class QuestionText(Question):
'''Question of type text
'''
__mapper_args__ = {'polymorphic_identity': 'text'}
isNumber = Column(Boolean, default=False)
isNumberFloat = Column(Boolean, default=False)
regularExpression = Column (String(256), default="")
#text with the error if the user answer with wrong regular expression
errorMessage = Column (String(256), default="")
class QuestionChoice(Question):
'''Question of type choice
'''
__mapper_args__ = {'polymorphic_identity': 'choice'}
range_min = Column(Integer, default="")
range_max = Column(Integer,default="")
range_step = Column(Float, default = 1)
render = Column(Enum('vertical','horizontal','select'), default="vertical")
#: possible choices
@hybrid_property
def is_range(self):
return self.range_min !="" and self.range_max !=""
def number(self):
if self.is_range:
return self.range_max + 1 - self.range_min
return len(self.choices)
class QuestionLikertScale(Question):
'''Question of type likert Scale
'''
__mapper_args__ = {'polymorphic_identity': 'likertScale'}
minLikert = Column(Integer)
maxLikert = Column(Integer)
labelMin = Column(String(128), default="")
labelMax = Column(String(128), default="")
class Condition(db.Model):
'''condition to that question depends on the answer to another question
'''
__tablename__ = 'condition'
#: unique id (automatically generated)
id = Column(Integer, primary_key = True)
# type of operation
operation = Column(Enum('none','<', '==', '>'),
default='none')
# value to comparate in the operation
value = Column(String(64))
def to_xml(self):
condition = Element('condition')
operation = SubElement(condition,'operation')
operation.text = str(self.operation)
value = SubElement(condition,'value')
value.text = str(self.value)
return condition
@staticmethod
def from_xml(root,question,msg):
operation = findField('operation',root,msg)
value = findField('value',root,msg)
condition = Condition(operation=operation,
value=value,
question=question)
db.session.add(condition)
ROLE_USER = 0
ROLE_RESEARCHER = 1
ROLE_ADMIN = 2
class User(db.Model):
'''A table with user
'''
__tablename__ = 'user'
#: unique id (automatically generated)
id = Column(Integer, primary_key = True)
#: created timestamp (automatically set)
created = Column(DateTime, default = make_timestamp)
#: email address ...
email = Column(Unicode(length=254), unique=True, nullable=False)
#: user name
nickname = Column(String(64))
#: password ...
password_hash = db.Column(db.String(128))
#: user confirmed ...
confirmed = db.Column(db.Boolean, default=False)
#: role of user
role = Column(SmallInteger, default = ROLE_USER)
## Relationships
#: User have zero or more answers
answers = relationship('Answer', backref = 'user', lazy = 'dynamic')
#: User have zero or more stateSurvey
stateSurveys = relationship('StateSurvey', backref = 'user', lazy = 'dynamic')
#: A researcher have zero or more Surveys
Surveys = relationship('Survey', backref = 'researcher', lazy = 'dynamic')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if len(User.query.all())==0:
self.role=ROLE_RESEARCHER
def __repr__(self):
return "<user(id='%s')>" % (
self.id)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def is_authenticated(self):
'''Returns True if the user is authenticated, i.e. they have provided
valid credentials. (Only authenticated users will fulfill the criteria
of login_required.)
'''
return True
def is_active(self):
'''Returns True if this is an active user - in addition to being
authenticated, they also have activated their account, not been
suspended, or any condition your application has for rejecting an
account. Inactive accounts may not log in (without being forced of
course).
'''
return True
def is_anonymous(self):
'''Returns True if this is an anonymous user. (Actual users should
return False instead.)
'''
return False
def get_id(self):
'''Returns a unicode that uniquely identifies this user, and can be
used to load the user from the user_loader callback. Note that this
must be a unicode
'''
return unicode(self.id)
def is_researcher(self):
return self.role == ROLE_RESEARCHER
@lm.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Answer(db.Model):
'''A table with answers
'''
__tablename__ = 'answer'
#: unique id (automatically generated)
id = Column(Integer, primary_key = True)
#: created timestamp (automatically set)
created = Column(DateTime, default = make_timestamp)
#: answer Numeric
answerNumeric = Column(Integer)
#: answer Text
answerText = Column(String)
#: answer Boolean
answerYN = Column(Boolean)
#:numberAttemp do in a question with expected answer
numberAttempt = Column(Integer, default = 0)
#:time since start section until you respond to the question, in milliseconds
globalTime = Column(Integer, default = 0)
#:time while since you answered the previous question, in milliseconds
differentialTime = Column(Integer, default = 0)
## Relationships
#answer belong a user
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
#answer belong a question
question_id = Column(Integer, ForeignKey('question.id'), nullable=False)
__table_args__ = (UniqueConstraint('user_id', 'question_id'),)
def __repr__(self):
return "<answer(id='%s', user='%s', question='%s')>\n" % (
self.id, self.user_id, self.question_id)
@hybrid_property
def section(self):
return self.question.section_id
@section.expression
def section(cls):
return Question.section_id
# @hybrid_property
# def survey(self):
# return self.question.section.survey_id
def answerAttempt(self):
'''Return if the answer is the correct, else increment in 1
the number attempt
'''
if self.question.isExpectedAnswer():
if isinstance(self.question, QuestionText):
if self.answerText.lower() != self.question.expectedAnswer.lower():
self.numberAttempt = self.numberAttempt + 1
db.session.add(self)
db.session.commit()
return False
else:
return True
if isinstance(self.question,QuestionYN):
if self.answerYN != (self.question.expectedAnswer.lower()=='yes'):
self.numberAttempt = self.numberAttempt + 1
db.session.add(self)
db.session.commit()
return False
else:
return True
if isinstance(self.question,QuestionChoice):
if self.answerText.lower() != self.question.expectedAnswer.lower() or \
str(self.answerNumeric) == self.question.expectedAnswer:
self.numberAttempt = self.numberAttempt + 1
db.session.add(self)
db.session.commit()
return False
else:
return True
else:
return True
def isMoreAttempt(self):
'''Return if there are more attempt
'''
if self.numberAttempt>=self.question.maxNumberAttempt and self.question.maxNumberAttempt!=0:
return False
else:
return True
class StateSurvey(db.Model):
'''A table that saves the state of a survey
'''
NONE = 0x00
# finish
FINISH = 0x01
# finish ok
FINISH_OK = 0x02
#: finished out of time
TIMED_OUT = 0x04
#: finished out of date
END_DATE_OUT = 0x08
#:part two section with money
PART2_MONEY = 0X10
#:part two section without money
PART2_NO_MONEY = 0X20
#:part three section with money
PART3_MONEY = 0X40
#:part three section without money
PART3_NO_MONEY = 0X80
#:"match" game impatience, part2
GAME_IMPATIENCE = 0X100
#:"match" game lottery v1, part3, decision 1 v1
GAME_LOTTERY_V1 = 0X200
#:"match" game lottery v2, part3, decision 1 v2
GAME_LOTTERY_V2 = 0X400
#:"match" game rent1, part3, decision 2
GAME_RENT1 = 0X800
#:"match" game rent2, part3, decision 3
GAME_RENT2 = 0X1000
#:"match" game ultimatum, part3, decision 4&5
GAME_ULTIMATUM = 0X2000
#:"match" game dictador, part3, decision 6
GAME_DICTADOR = 0X4000
#: do matching
MATCHING = 0X8000
#: write stat
STATS = 0X10000
NO_ERROR = 0
# maximum number of surveys execeeded
ERROR_EXCEEDED = 1
#: time exceeded
ERROR_TIMED_OUT = 2
#out of date
ERROR_END_DATE_OUT = 3
# survey not found
ERROR_NO_SURVEY = 4
__tablename__ = 'stateSurvey'
#: unique id (automatically generated)
id = Column(Integer, primary_key = True)
#: created timestamp (automatically set)
created = Column(DateTime, default = make_timestamp)
#: init when acept the consents
start_date = Column(DateTime, default = make_timestamp)
#: time when finish the survey
endDate = Column(DateTime)
#: ip of user, ipv6, 8 block of FFFF, 8*5-1
ip = Column(String(40))
#: Consent accept or not
consented = Column(Boolean, default=False)
#: finished or not
status = Column(Integer, default = NONE)
#: Sequence of sections are traversed (it is a list of secction to go through )
sequence = Column(PickleType)
#: list with section/time, maybe better crearte new table, in ms
sectionTime = Column(PickleType, default = {})
#: index the lastt sections made
index = Column(Integer, default =0)
## Relationships
#stateSurvey belong a user
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
#stateSurvey belong a survey
survey_id = Column(Integer, ForeignKey('survey.id'), nullable=False)
__table_args__ = (UniqueConstraint('user_id', 'survey_id'),)
def __repr__(self):
return "<StateSurvey(id='%s', survey='%s', user='%s', status='%s')>" % (
self.id, self.survey_id, self.user_id, self.status)
def __init__(self, **kwargs):
super(StateSurvey, self).__init__(**kwargs)
sections = Section.query.filter(Section.survey== self.survey).order_by(Section.sequence)
list = Section.sequenceSections(sections)
# to load test
if current_app.config.get('JMETER',False):
list = current_app.config.get('SEQUENCE',[])
self.sequence=list
self.sectionTime={}
def get_status(self):
'''return a string with the status
'''
string =""
if self.status & StateSurvey.NONE:
string = string + "not finish, "
if self.status & StateSurvey.FINISH_OK:
string = string + "finish ok,"
if self.status & StateSurvey.TIMED_OUT:
string = string + "timed_out,"
if self.status & StateSurvey.END_DATE_OUT:
string = string + "end date out"
return string
def _delete_answers(self):
'''find all answer of user in this survey,
I could do a recursive query.
'''
for s in self.sequence:
section = Section.query.get(s)
answers = Answer.query.filter(\
Answer.question_id==Question.id,\
Question.section_id==section.id,\
Answer.user_id == self.user_id)
for ans in answers:
db.session.delete(ans)
db.session.commit()
def check_survey_duration_and_date(self):
# return true if duration survey ok
now = datetime.datetime.utcnow()
start = self.start_date
elapsedTime = now - start
if self.survey.is_duration():
if elapsedTime.total_seconds()>self.survey.duration*60 and \
not (self.status & StateSurvey.FINISH):
# time has run out, delete all cuestions
self.status = StateSurvey.TIMED_OUT | StateSurvey.FINISH
db.session.add(self)
db.session.commit()
return StateSurvey.ERROR_TIMED_OUT
if now > self.survey.endDate or now < self.survey.startDate:
#answer out of date
self.status = StateSurvey.END_DATE_OUT | StateSurvey.FINISH
db.session.add(self)
db.session.commit()
return StateSurvey.ERROR_END_DATE_OUT
return StateSurvey.NO_ERROR
def accept_consent(self):
'''
'''
self.consented=True
self.start_date= datetime.datetime.utcnow()
db.session.add(self)
db.session.commit()
def percentSurvey(self):
'''returns the percentage done of the survey
'''
return round(100*self.index/len(self.sequence))
def nextSection(self):
'''Return next Section to do, None if there isn't
'''
if self.index>=len(self.sequence) or self.status & StateSurvey.FINISH:
return None
section = Section.query.get(self.sequence[self.index])
return section
def is_finished(self):
return self.status & StateSurvey.FINISH >0
def finishedSection(self,time):
'''Section is finished, index+1
'''
#note, with picleType not found append (don't save), self.sectionTime.append(), bug?
d1 = self.sectionTime.copy()
d1[self.sequence[self.index]] = time
self.sectionTime = d1.copy()
self.index=self.index+1
if self.index>=len(self.sequence):
self.status = self.status | StateSurvey.FINISH | StateSurvey.FINISH_OK
self.endDate = datetime.datetime.utcnow()
db.session.add(self)
db.session.commit()
@staticmethod
def getStateSurvey(id_survey, user, ip = ""):
stateSurvey = StateSurvey.query.filter(StateSurvey.survey_id == id_survey,\
StateSurvey.user_id == user.id).first()
if stateSurvey is None:
survey = Survey.query.get(id_survey)
if survey is None:
return None, StateSurvey.ERROR_NO_SURVEY
if survey.maxNumberRespondents > 0 and survey.maxNumberRespondents<=StateSurvey.query.filter(
StateSurvey.status.op('&')(StateSurvey.FINISH_OK),
StateSurvey.survey_id==survey.id).count():
return None, StateSurvey.ERROR_EXCEEDED
stateSurvey = StateSurvey(survey = Survey.query.get(id_survey),
user = user, ip = ip)
db.session.add(stateSurvey)
db.session.commit()
return stateSurvey, stateSurvey.check_survey_duration_and_date()
class GameImpatience(db.Model):
'''store the result of part_two
'''
__tablename__ = 'gameImpatience'
#: unique id (automatically generated)
id = Column(Integer, primary_key = True)
#: survey
survey_id = Column(Integer,ForeignKey('survey.id'),nullable=False)
#: user
user_id = Column(Integer, ForeignKey('user.id'),nullable=False)
#:answer of User
answer_id = Column(Integer, ForeignKey('answer.id'),nullable=False)
#: if money real or no
is_real_money = Column(Boolean, default=False)
#:prize or no
prize = Column(Boolean, default=False)
## Relationships
user = relationship("User")
answer = relationship("Answer")
survey = relationship("Survey")
__table_args__ = (UniqueConstraint('user_id', 'survey_id'),)
def __init__(self, **kwargs):
super(GameImpatience, self).__init__(**kwargs)
if self.is_real_money:
if random.randint(1,10)==1:
#user prize
self.prize=True
@hybrid_property
def answer_text(self):
return Answer.query.get(self.answer).answerText
class Game(db.Model):
'''store the result of part three game1,2,3
'''
__tablename__='game'
#: unique id (automatically generated)
id = Column(Integer, primary_key = True)
#: survey
survey_id = Column(Integer,ForeignKey('survey.id'), nullable=False)
#: userA
userA_id = Column(Integer, ForeignKey('user.id'))
#: userB
userB_id = Column(Integer, ForeignKey('user.id'))
#:answer of UserA
answerA_id = Column(Integer, ForeignKey('answer.id'))
#:answer of UserB
answerB_id = Column(Integer, ForeignKey('answer.id'))
#: if money real or no
is_real_money = Column(Boolean, default=False)
#:prize user A or no
prizeA = Column(Boolean, default=False)
#:prize user B or no
prizeB = Column(Boolean, default=False)
#:userA isn't his first Match (repeat by fault users)
repeatA = Column(Boolean, default=False)
#:userB isn't his first Match (repeat by fault users)
repeatB = Column(Boolean, default=False)
#:money earned of userA
moneyA = Column(Float)
#:money earned of userB
moneyB = Column(Float)
#: if money real or no
is_real_money = Column(Boolean, default=False)
#: Type of question, discriminate between classes
type = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': type}
## Relationships
userA = relationship("User", foreign_keys = "Game.userA_id")
userB = relationship("User", foreign_keys = "Game.userB_id")
answerA = relationship("Answer", foreign_keys = "Game.answerA_id")
answerB = relationship("Answer", foreign_keys = "Game.answerB_id")
survey = relationship("Survey")
@hybrid_property
def cashInitA(self):
return float(self.answerA.answerText)
@hybrid_property
def cashInitB(self):
return float(self.answerB.answerText)
@hybrid_property
def statusA(self):
ss = StateSurvey.query.filter(StateSurvey.user_id==self.userA_id,\
StateSurvey.survey_id==self.survey_id).first()
return ss.status
@hybrid_property
def statusB(self):
ss = StateSurvey.query.filter(StateSurvey.user_id==self.userB_id,\
StateSurvey.survey_id==self.survey_id).first()
return ss.status
class GameLottery1(Game):
'''Lottery game version1 (part 3, decision 1)
'''
__mapper_args__ = {'polymorphic_identity': 'gameLottery1'}
#:user win (A or B)
win_id = Column(Integer,ForeignKey('user.id'))
## Relationships
win = relationship("User", foreign_keys = "GameLottery1.win_id")
def __init__(self, **kwargs):
'''Probability:= userA_Money/(userA_Money+user_MoneyB)
'''
super(GameLottery1, self).__init__(**kwargs)
AWARD = 10
INIT_MONEY = 10
percentA = self.percent_playerA
percentB = self.percent_playerB
if percentA == 0 and percentB ==0 :
# nobody play lottery
self.moneyA = INIT_MONEY
self.moneyB = INIT_MONEY
elif percentA>random.random():
#answerA win
self.win = self.userA
self.moneyA = AWARD + (INIT_MONEY - self.cashInitA)
self.moneyB = (INIT_MONEY - self.cashInitB)
else:
self.win = self.userB
self.moneyB = AWARD + (INIT_MONEY - self.cashInitB)
self.moneyA = (INIT_MONEY - self.cashInitA)
@hybrid_property
def percent_playerA(self):
try:
return self.cashInitA/(self.cashInitA+self.cashInitB)
except ZeroDivisionError:
return 0
@hybrid_property
def percent_playerB(self):
try:
return self.cashInitB/(self.cashInitA+self.cashInitB)
except ZeroDivisionError:
return 0
class GameLottery2(Game):
'''Lottery game version2 (part 3, decision 1)
'''
__mapper_args__ = {'polymorphic_identity': 'gameLottery2'}
def __init__(self, **kwargs):
'''percent of prize: userA_Money/(userA_Money+user_MoneyB)
'''
super(GameLottery2, self).__init__(**kwargs)
AWARD = 10
INIT_MONEY = 10
self.moneyA= AWARD*self.percent_playerA + (INIT_MONEY - self.cashInitA)
self.moneyB= AWARD*self.percent_playerB + (INIT_MONEY - self.cashInitB)
@hybrid_property
def percent_playerA(self):
try:
return self.cashInitA/(self.cashInitA+self.cashInitB)
except ZeroDivisionError:
return 0
@hybrid_property
def percent_playerB(self):
try:
return self.cashInitB/(self.cashInitA+self.cashInitB)
except ZeroDivisionError:
return 0
class GameRent1(Game):
'''Rent1 game(part 3, decision 2)
'''
__mapper_args__ = {'polymorphic_identity': 'gameRent1'}
def __init__(self, **kwargs):
super(GameRent1, self).__init__(**kwargs)
INIT_MONEY = 10
self.moneyA = self.fund + INIT_MONEY - self.cashInitA
self.moneyB = self.fund + INIT_MONEY - self.cashInitB
@hybrid_property
def fund(self):
CONSTANT_FUND = 0.8
return (self.cashInitA+self.cashInitB)*CONSTANT_FUND
class GameRent2(Game):
'''Rent 2 game(part 3, decision 3)
'''
__mapper_args__ = {'polymorphic_identity': 'gameRent2'}
def __init__(self, **kwargs):
super(GameRent2, self).__init__(**kwargs)
INIT_MONEY = 10
self.moneyA = self.fund + INIT_MONEY - self.cashInitA
self.moneyB = self.fund + INIT_MONEY - self.cashInitB
@hybrid_property
def fund(self):
CONSTANT_FUND = 1.2
return (self.cashInitA+self.cashInitB)*CONSTANT_FUND
class GameUltimatum(Game):
'''Ultimatum game(part 3, decision 4&5)
'''
__mapper_args__ = {'polymorphic_identity': 'gameUltimatum'}
section = Column(Integer,ForeignKey('section.id'))
#: if money is accepted or no
accepted = Column(Boolean)
def __init__(self, **kwargs):
def get_interval(section_id):
'''return a dic witch all interval(money:question_id)
'''
dic ={}
for q in Section.query.get(section_id).questions:
if q.decision=="decision_five":
dic[int(q.container[0])]=q.id
return dic
super(GameUltimatum, self).__init__(**kwargs)
MONEY = 20
# interval = QuestionDecisionFive.getIntverval(section_id)
dic = get_interval(self.section)
if dic.has_key(self.cashInitA):
answer = Answer.query.filter(
Answer.question_id == dic[self.cashInitA],
Answer.user_id == self.userB.id).first()
self.answerB =answer
if answer.answerNumeric==0:
# answer accept
self.moneyA = MONEY - self.cashInitA
self.moneyB = self.cashInitA
self.accepted = True
else:
self.moneyA = 0
self.moneyB = 0
self.accepted = False
else:
raise "survey with bad gameUltimatum, there are not all keys\n"
class GameDictador(Game):
'''Dictador game(part 3, decision 6)
'''
__mapper_args__ = {'polymorphic_identity': 'gameDictador'}
def __init__(self, **kwargs):
super(GameDictador, self).__init__(**kwargs)
MONEY = 20
self.moneyA = MONEY - self.cashInitA
self.moneyB = self.cashInitA
class Raffle(db.Model):
'''store the result of raffle (users that have game allways with untrue money)
'''
__tablename__ = 'raffle'
#: unique id (automatically generated)
id = Column(Integer, primary_key = True)
#: survey
survey_id = Column(Integer,ForeignKey('survey.id'),nullable=False)
#: user
user_id = Column(Integer, ForeignKey('user.id'),nullable=False)
#:answer of Prize
prize = Column(Integer, default = 0)
## Relationships
user = relationship("User")
survey = relationship("Survey")
__table_args__ = (UniqueConstraint('user_id', 'survey_id'),)
def __init__(self, **kwargs):
super(Raffle, self).__init__(**kwargs)
if random.randint(1,10)==1:
i = random.randint(1,4)
if i==1:
self.prize=5
elif i==2:
self.prize=10
elif i==3:
self.prize=20
elif i==4:
self.prize=40
else:
raise "opcion invalid"
else:
self.prize=0
|
|
# Copyright (C) 2015 Tom Barron <tpb@dyncloud.net>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Backup NFS driver.
"""
import bz2
import filecmp
import hashlib
import os
import shutil
import tempfile
import zlib
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_config import cfg
import six
from cinder.backup.drivers import nfs
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder import utils
CONF = cfg.CONF
FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base'
FAKE_HOST = 'fake_host'
FAKE_EXPORT_PATH = 'fake/export/path'
FAKE_BACKUP_SHARE = '%s:/%s' % (FAKE_HOST, FAKE_EXPORT_PATH)
FAKE_BACKUP_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE,
FAKE_EXPORT_PATH)
FAKE_BACKUP_ID = fake.BACKUP_ID
FAKE_BACKUP_ID_PART1 = fake.BACKUP_ID[:2]
FAKE_BACKUP_ID_PART2 = fake.BACKUP_ID[2:4]
FAKE_BACKUP_ID_REST = fake.BACKUP_ID[4:]
UPDATED_CONTAINER_NAME = os.path.join(FAKE_BACKUP_ID_PART1,
FAKE_BACKUP_ID_PART2,
FAKE_BACKUP_ID)
class BackupNFSShareTestCase(test.TestCase):
def setUp(self):
super(BackupNFSShareTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.mock_object(nfs, 'LOG')
def test_check_configuration_no_backup_share(self):
self.override_config('backup_share', None)
self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path',
mock.Mock(return_value=FAKE_BACKUP_PATH))
with mock.patch.object(nfs.NFSBackupDriver, '_check_configuration'):
driver = nfs.NFSBackupDriver(self.ctxt)
self.assertRaises(exception.ConfigNotFound,
driver._check_configuration)
def test_init_backup_repo_path(self):
self.override_config('backup_share', FAKE_BACKUP_SHARE)
self.override_config('backup_mount_point_base',
FAKE_BACKUP_MOUNT_POINT_BASE)
mock_remotefsclient = mock.Mock()
mock_remotefsclient.get_mount_point = mock.Mock(
return_value=FAKE_BACKUP_PATH)
self.mock_object(nfs.NFSBackupDriver, '_check_configuration')
self.mock_object(remotefs_brick, 'RemoteFsClient',
mock.Mock(return_value=mock_remotefsclient))
self.mock_object(utils, 'get_root_helper')
with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'):
driver = nfs.NFSBackupDriver(self.ctxt)
path = driver._init_backup_repo_path()
self.assertEqual(FAKE_BACKUP_PATH, path)
utils.get_root_helper.called_once()
mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE)
mock_remotefsclient.get_mount_point.assert_called_once_with(
FAKE_BACKUP_SHARE)
def fake_md5(arg):
class result(object):
def hexdigest(self):
return 'fake-md5-sum'
ret = result()
return ret
class BackupNFSSwiftBasedTestCase(test.TestCase):
"""Test Cases for based on Swift tempest backup tests."""
_DEFAULT_VOLUME_ID = fake.VOLUME_ID
def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID):
vol = {'id': volume_id,
'size': 1,
'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self,
volume_id=_DEFAULT_VOLUME_ID,
container='test-container',
backup_id=fake.BACKUP_ID,
parent_id=None):
try:
db.volume_get(self.ctxt, volume_id)
except exception.NotFound:
self._create_volume_db_entry(volume_id=volume_id)
backup = {'id': backup_id,
'size': 1,
'container': container,
'volume_id': volume_id,
'parent_id': parent_id,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
}
return db.backup_create(self.ctxt, backup)['id']
def setUp(self):
super(BackupNFSSwiftBasedTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.stubs.Set(hashlib, 'md5', fake_md5)
self.volume_file = tempfile.NamedTemporaryFile()
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(self.volume_file.close)
self.override_config('backup_share', FAKE_BACKUP_SHARE)
self.override_config('backup_mount_point_base',
'/tmp')
self.override_config('backup_file_size', 52428800)
mock_remotefsclient = mock.Mock()
mock_remotefsclient.get_mount_point = mock.Mock(
return_value=self.temp_dir)
self.mock_object(remotefs_brick, 'RemoteFsClient',
mock.Mock(return_value=mock_remotefsclient))
# Remove tempdir.
self.addCleanup(shutil.rmtree, self.temp_dir)
for _i in range(0, 32):
self.volume_file.write(os.urandom(1024))
def test_backup_uncompressed(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
def test_backup_bz2(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='bz2')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
def test_backup_zlib(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zlib')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
def test_backup_default_container(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id,
container=None,
backup_id=FAKE_BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
self.assertEqual(backup['container'], UPDATED_CONTAINER_NAME)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_end')
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_notification')
def test_backup_default_container_notify(self, _send_progress,
_send_progress_end):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id,
container=None)
# If the backup_object_number_per_notification is set to 1,
# the _send_progress method will be called for sure.
CONF.set_override("backup_object_number_per_notification", 1)
CONF.set_override("backup_enable_progress_timer", False)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the backup_object_number_per_notification is increased to
# another value, the _send_progress method will not be called.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the timer is enabled, the _send_progress will be called,
# since the timer can trigger the progress notification.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
CONF.set_override("backup_enable_progress_timer", True)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
def test_backup_custom_container(self):
volume_id = fake.VOLUME_ID
container_name = 'fake99'
self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(backup['container'], container_name)
def test_backup_shafile(self):
volume_id = fake.VOLUME_ID
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(backup['container'], container_name)
# Verify sha contents
content1 = service._read_sha256file(backup)
self.assertEqual(32 * 1024 / content1['chunk_size'],
len(content1['sha256s']))
def test_backup_cmp_shafiles(self):
volume_id = fake.VOLUME_ID
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
self.assertEqual(deltabackup['container'], container_name)
# Compare shas from both files
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertEqual(len(content1['sha256s']), len(content2['sha256s']))
self.assertEqual(set(content1['sha256s']), set(content2['sha256s']))
def test_backup_delta_two_objects_change(self):
volume_id = fake.VOLUME_ID
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size=(8 * 1024))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
self.assertEqual(deltabackup['container'], container_name)
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
# Verify that two shas are changed at index 16 and 20
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
def test_backup_delta_two_blocks_in_object_change(self):
volume_id = fake.VOLUME_ID
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size=(8 * 1024))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
self.assertEqual(deltabackup['container'], container_name)
# Verify that two shas are changed at index 16 and 20
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
def test_backup_backup_metadata_fail(self):
"""Test of when an exception occurs in backup().
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process of an
exception handler.
"""
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver, '_backup_metadata',
fake_backup_metadata)
# We expect that an exception be notified directly.
self.assertRaises(exception.BackupDriverException,
service.backup,
backup, self.volume_file)
def test_backup_backup_metadata_fail2(self):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete().
"""
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver, '_backup_metadata',
fake_backup_metadata)
def fake_delete(self, backup):
raise exception.BackupOperationError()
# Raise a pseudo exception.BackupOperationError.
self.stubs.Set(nfs.NFSBackupDriver, 'delete', fake_delete)
# We expect that the second exception is notified.
self.assertRaises(exception.BackupOperationError,
service.backup,
backup, self.volume_file)
def test_restore_uncompressed(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
self.flags(backup_sha_block_size_bytes=32)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.restore(backup, volume_id, restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_bz2(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='bz2')
self.flags(backup_file_size=(1024 * 3))
self.flags(backup_sha_block_size_bytes=1024)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.restore(backup, volume_id, restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_zlib(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zlib')
self.flags(backup_file_size=(1024 * 3))
self.flags(backup_sha_block_size_bytes = 1024)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.restore(backup, volume_id, restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_delta(self):
volume_id = fake.VOLUME_ID
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size =(1024 * 8))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=fake.BACKUP2_ID,
parent_id=fake.BACKUP_ID)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.backup(deltabackup, self.volume_file, True)
deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
service.restore(backup, volume_id,
restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_delete(self):
volume_id = fake.VOLUME_ID
self._create_backup_db_entry(volume_id=volume_id)
service = nfs.NFSBackupDriver(self.ctxt)
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
service.delete(backup)
def test_get_compressor(self):
service = nfs.NFSBackupDriver(self.ctxt)
compressor = service._get_compressor('None')
self.assertIsNone(compressor)
compressor = service._get_compressor('zlib')
self.assertEqual(compressor, zlib)
compressor = service._get_compressor('bz2')
self.assertEqual(compressor, bz2)
self.assertRaises(ValueError, service._get_compressor, 'fake')
def create_buffer(self, size):
# Set up buffer of zeroed bytes
fake_data = bytearray(size)
if six.PY2:
# On Python 2, zlib.compressor() accepts buffer, but not bytearray
fake_data = buffer(fake_data)
return fake_data
def test_prepare_output_data_effective_compression(self):
service = nfs.NFSBackupDriver(self.ctxt)
fake_data = self.create_buffer(128)
result = service._prepare_output_data(fake_data)
self.assertEqual('zlib', result[0])
self.assertGreater(len(fake_data), len(result))
def test_prepare_output_data_no_compresssion(self):
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
fake_data = self.create_buffer(128)
result = service._prepare_output_data(fake_data)
self.assertEqual('none', result[0])
self.assertEqual(fake_data, result[1])
def test_prepare_output_data_ineffective_compression(self):
service = nfs.NFSBackupDriver(self.ctxt)
fake_data = self.create_buffer(128)
# Pre-compress so that compression in the driver will be ineffective.
already_compressed_data = service.compressor.compress(fake_data)
result = service._prepare_output_data(already_compressed_data)
self.assertEqual('none', result[0])
self.assertEqual(already_compressed_data, result[1])
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from re import match
from json import dumps
from ..decorators import requires_auth
from .comment import IssueComment, issue_comment_params
from .event import IssueEvent
from .label import Label
from .milestone import Milestone
from ..models import GitHubCore
from ..users import User
from uritemplate import URITemplate
class Issue(GitHubCore):
"""The :class:`Issue <Issue>` object. It structures and handles the data
returned via the `Issues <http://developer.github.com/v3/issues>`_ section
of the GitHub API.
Two issue instances can be checked like so::
i1 == i2
i1 != i2
And is equivalent to::
i1.id == i2.id
i1.id != i2.id
"""
def _update_attributes(self, issue):
self._api = issue.get('url', '')
#: :class:`User <github3.users.User>` representing the user the issue
#: was assigned to.
self.assignee = issue.get('assignee')
if self.assignee:
self.assignee = User(issue.get('assignee'), self)
#: Body (description) of the issue.
self.body = issue.get('body', '')
#: HTML formatted body of the issue.
self.body_html = issue.get('body_html', '')
#: Plain text formatted body of the issue.
self.body_text = issue.get('body_text', '')
# If an issue is still open, this field will be None
#: datetime object representing when the issue was closed.
self.closed_at = self._strptime(issue.get('closed_at'))
#: Number of comments on this issue.
self.comments_count = issue.get('comments')
#: Comments url (not a template)
self.comments_url = issue.get('comments_url')
#: datetime object representing when the issue was created.
self.created_at = self._strptime(issue.get('created_at'))
#: Events url (not a template)
self.events_url = issue.get('events_url')
#: URL to view the issue at GitHub.
self.html_url = issue.get('html_url')
#: Unique ID for the issue.
self.id = issue.get('id')
#: Returns the list of :class:`Label <github3.issues.label.Label>`\ s
#: on this issue.
self.original_labels = [
Label(l, self) for l in issue.get('labels')
]
labels_url = issue.get('labels_url')
#: Labels URL Template. Expand with ``name``
self.labels_urlt = URITemplate(labels_url) if labels_url else None
#: :class:`Milestone <github3.issues.milestone.Milestone>` this
#: issue was assigned to.
self.milestone = None
if issue.get('milestone'):
self.milestone = Milestone(issue.get('milestone'), self)
#: Issue number (e.g. #15)
self.number = issue.get('number')
#: Dictionary URLs for the pull request (if they exist)
self.pull_request_urls = issue.get('pull_request', {})
m = match('https?://[\w\d\-\.\:]+/(\S+)/(\S+)/(?:issues|pull)/\d+',
self.html_url)
#: Returns ('owner', 'repository') this issue was filed on.
self.repository = m.groups()
#: State of the issue, e.g., open, closed
self.state = issue.get('state')
#: Title of the issue.
self.title = issue.get('title')
#: datetime object representing the last time the issue was updated.
self.updated_at = self._strptime(issue.get('updated_at'))
#: :class:`User <github3.users.User>` who opened the issue.
self.user = User(issue.get('user'), self)
closed_by = issue.get('closed_by')
#: :class:`User <github3.users.User>` who closed the issue.
self.closed_by = User(closed_by, self) if closed_by else None
def _repr(self):
return '<Issue [{r[0]}/{r[1]} #{n}]>'.format(r=self.repository,
n=self.number)
@requires_auth
def add_labels(self, *args):
"""Add labels to this issue.
:param str args: (required), names of the labels you wish to add
:returns: list of :class:`Label`\ s
"""
url = self._build_url('labels', base_url=self._api)
json = self._json(self._post(url, data=args), 200)
return [Label(l, self) for l in json] if json else []
@requires_auth
def assign(self, username):
"""Assigns user ``username`` to this issue. This is a short cut for
``issue.edit``.
:param str username: username of the person to assign this issue to
:returns: bool
"""
if not username:
return False
number = self.milestone.number if self.milestone else None
labels = [str(l) for l in self.original_labels]
return self.edit(self.title, self.body, username, self.state, number,
labels)
@requires_auth
def close(self):
"""Close this issue.
:returns: bool
"""
assignee = self.assignee.login if self.assignee else ''
number = self.milestone.number if self.milestone else None
labels = [str(l) for l in self.original_labels]
return self.edit(self.title, self.body, assignee, 'closed',
number, labels)
def comment(self, id_num):
"""Get a single comment by its id.
The catch here is that id is NOT a simple number to obtain. If
you were to look at the comments on issue #15 in
sigmavirus24/Todo.txt-python, the first comment's id is 4150787.
:param int id_num: (required), comment id, see example above
:returns: :class:`IssueComment <github3.issues.comment.IssueComment>`
"""
json = None
if int(id_num) > 0: # Might as well check that it's positive
owner, repo = self.repository
url = self._build_url('repos', owner, repo, 'issues', 'comments',
str(id_num))
json = self._json(self._get(url), 200)
return self._instance_or_null(IssueComment, json)
def comments(self, number=-1, sort='', direction='', since=None):
"""Iterate over the comments on this issue.
:param int number: (optional), number of comments to iterate over
Default: -1 returns all comments
:param str sort: accepted valuees: ('created', 'updated')
api-default: created
:param str direction: accepted values: ('asc', 'desc')
Ignored without the sort parameter
:param since: (optional), Only issues after this date will
be returned. This can be a `datetime` or an ISO8601 formatted
date string, e.g., 2012-05-20T23:10:27Z
:type since: datetime or string
:returns: iterator of
:class:`IssueComment <github3.issues.comment.IssueComment>`\ s
"""
url = self._build_url('comments', base_url=self._api)
params = issue_comment_params(sort, direction, since)
return self._iter(int(number), url, IssueComment, params)
@requires_auth
def create_comment(self, body):
"""Create a comment on this issue.
:param str body: (required), comment body
:returns: :class:`IssueComment <github3.issues.comment.IssueComment>`
"""
json = None
if body:
url = self._build_url('comments', base_url=self._api)
json = self._json(self._post(url, data={'body': body}),
201)
return self._instance_or_null(IssueComment, json)
@requires_auth
def edit(self, title=None, body=None, assignee=None, state=None,
milestone=None, labels=None):
"""Edit this issue.
:param str title: Title of the issue
:param str body: markdown formatted body (description) of the issue
:param str assignee: login name of user the issue should be assigned
to
:param str state: accepted values: ('open', 'closed')
:param int milestone: the NUMBER (not title) of the milestone to
assign this to [1]_, or 0 to remove the milestone
:param list labels: list of labels to apply this to
:returns: bool
.. [1] Milestone numbering starts at 1, i.e. the first milestone you
create is 1, the second is 2, etc.
"""
json = None
data = {'title': title, 'body': body, 'assignee': assignee,
'state': state, 'milestone': milestone, 'labels': labels}
self._remove_none(data)
if data:
if 'milestone' in data and data['milestone'] == 0:
data['milestone'] = None
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_attributes(json)
return True
return False
def events(self, number=-1):
"""Iterate over events associated with this issue only.
:param int number: (optional), number of events to return. Default: -1
returns all events available.
:returns: generator of
:class:`IssueEvent <github3.issues.event.IssueEvent>`\ s
"""
url = self._build_url('events', base_url=self._api)
return self._iter(int(number), url, IssueEvent)
def is_closed(self):
"""Checks if the issue is closed.
:returns: bool
"""
if self.closed_at or (self.state == 'closed'):
return True
return False
def labels(self, number=-1, etag=None):
"""Iterate over the labels associated with this issue.
:param int number: (optional), number of labels to return. Default: -1
returns all labels applied to this issue.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Label <github3.issues.label.Label>`\ s
"""
url = self._build_url('labels', base_url=self._api)
return self._iter(int(number), url, Label, etag=etag)
def pull_request(self):
"""Retrieve the pull request associated with this issue.
:returns: :class:`~github3.pulls.PullRequest`
"""
from .. import pulls
json = None
pull_request_url = self.pull_request_urls.get('url')
if pull_request_url:
json = self._json(self._get(pull_request_url), 200)
return self._instance_or_null(pulls.PullRequest, json)
@requires_auth
def remove_label(self, name):
"""Removes label ``name`` from this issue.
:param str name: (required), name of the label to remove
:returns: bool
"""
url = self._build_url('labels', name, base_url=self._api)
# Docs say it should be a list of strings returned, practice says it
# is just a 204/404 response. I'm tenatively changing this until I
# hear back from Support.
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def remove_all_labels(self):
"""Remove all labels from this issue.
:returns: an empty list if successful
"""
# Can either send DELETE or [] to remove all labels
return self.replace_labels([])
@requires_auth
def replace_labels(self, labels):
"""Replace all labels on this issue with ``labels``.
:param list labels: label names
:returns: bool
"""
url = self._build_url('labels', base_url=self._api)
json = self._json(self._put(url, data=dumps(labels)), 200)
return [Label(l, self) for l in json] if json else []
@requires_auth
def reopen(self):
"""Re-open a closed issue.
:returns: bool
"""
assignee = self.assignee.login if self.assignee else ''
number = self.milestone.number if self.milestone else None
labels = [str(l) for l in self.original_labels]
return self.edit(self.title, self.body, assignee, 'open',
number, labels)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import os
from proton import *
from . import common
from .common import Skipped, pump
def _testpath(file):
""" Set the full path to the certificate,keyfile, etc. for the test.
"""
if os.name=="nt":
if file.find("private-key")!=-1:
# The private key is not in a separate store
return None
# Substitute pkcs#12 equivalent for the CA/key store
if file.endswith(".pem"):
file = file[:-4] + ".p12"
return os.path.join(os.path.dirname(__file__),
"ssl_db/%s" % file)
class SslTest(common.Test):
def __init__(self, *args):
common.Test.__init__(self, *args)
self._testpath = _testpath
def setUp(self):
if not common.isSSLPresent():
raise Skipped("No SSL libraries found.")
self.server_domain = SSLDomain(SSLDomain.MODE_SERVER)
self.client_domain = SSLDomain(SSLDomain.MODE_CLIENT)
def tearDown(self):
self.server_domain = None
self.client_domain = None
class SslTestConnection(object):
""" Represents a single SSL connection.
"""
def __init__(self, domain=None, mode=Transport.CLIENT,
session_details=None, conn_hostname=None,
ssl_peername=None):
if not common.isSSLPresent():
raise Skipped("No SSL libraries found.")
self.ssl = None
self.domain = domain
self.transport = Transport(mode)
self.connection = Connection()
if conn_hostname:
self.connection.hostname = conn_hostname
if domain:
self.ssl = SSL( self.transport, self.domain, session_details )
if ssl_peername:
self.ssl.peer_hostname = ssl_peername
# bind last, after all configuration complete:
self.transport.bind(self.connection)
def _pump(self, ssl_client, ssl_server, buffer_size=1024):
pump(ssl_client.transport, ssl_server.transport, buffer_size)
def _do_handshake(self, client, server):
""" Attempt to connect client to server. Will throw a TransportException if the SSL
handshake fails.
"""
client.connection.open()
server.connection.open()
self._pump(client, server)
if client.transport.closed:
return
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump(client, server)
def test_defaults(self):
if os.name=="nt":
raise Skipped("Windows SChannel lacks anonymous cipher support.")
""" By default, both the server and the client support anonymous
ciphers - they should connect without need for a certificate.
"""
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
# check that no SSL connection exists
assert not server.ssl.cipher_name()
assert not client.ssl.protocol_name()
#client.transport.trace(Transport.TRACE_DRV)
#server.transport.trace(Transport.TRACE_DRV)
client.connection.open()
server.connection.open()
self._pump( client, server )
# now SSL should be active
assert server.ssl.cipher_name() is not None
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_ssl_with_small_buffer(self):
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
small_buffer_size = 1
self._pump( client, server, small_buffer_size )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_server_certificate(self):
""" Test that anonymous clients can still connect to a server that has
a certificate configured.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_server_authentication(self):
""" Simple SSL connection with authentication of the server
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_certificate_fingerprint_and_subfields(self):
if os.name=="nt":
raise Skipped("Windows support for certificate fingerprint and subfield not implemented yet")
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
# give the client a certificate, but let's not require server authentication
self.client_domain.set_credentials(self._testpath("client-certificate1.pem"),
self._testpath("client-private-key1.pem"),
"client-password")
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
# Test the subject subfields
self.assertEqual("Client", server.ssl.get_cert_organization())
self.assertEqual("Dev", server.ssl.get_cert_organization_unit())
self.assertEqual("ST", server.ssl.get_cert_state_or_province())
self.assertEqual("US", server.ssl.get_cert_country())
self.assertEqual("City", server.ssl.get_cert_locality_or_city())
self.assertEqual("O=Server,CN=A1.Good.Server.domain.com", client.ssl.get_cert_subject())
self.assertEqual("O=Client,CN=127.0.0.1,C=US,ST=ST,L=City,OU=Dev", server.ssl.get_cert_subject())
self.assertEqual("f78f03ec31317c213dcf607c095242adbf067824", server.ssl.get_cert_fingerprint_sha1())
self.assertEqual("3836fd0d7bbc155158997ff336de29545cc1ce4137f8419062ceb8b50fd7a6f9", server.ssl.get_cert_fingerprint_sha256())
self.assertEqual("a8390634eb10c7a12ba3ce0837001bc6ae78c7690984f4788cf4430acdb496d5d9e02c8ec39219f5c4dcd908c34861d09481c2faf53b4ccc95dac60e623165c4",
server.ssl.get_cert_fingerprint_sha512())
self.assertEqual("32b7bc119f61c71d368caaf9a6bf58b2", server.ssl.get_cert_fingerprint_md5())
# Test the various fingerprint algorithms
self.assertEqual("0aab5922c8657a7fb78402b79379506d3d7806ce", client.ssl.get_cert_fingerprint_sha1())
self.assertEqual("de5e0c4097f841815a769ce1a30dbe912b83711438a5aaf50001da23cee5a8a8", client.ssl.get_cert_fingerprint_sha256())
self.assertEqual("d0aceeb68ab9de57c9e1c21a43a4511c54ec94011e770a523a6352b1374f59c8b58adc93d5cad6f25aa125b5934309a61a25e74a5d5e0cb40b07c7468615944c",
client.ssl.get_cert_fingerprint_sha512())
self.assertEqual("ae0ebcebc1f970fb696ef9f56e3235da", client.ssl.get_cert_fingerprint_md5())
self.assertEqual(None, client.ssl.get_cert_fingerprint(21, SSL.SHA1)) # Should be at least 41
self.assertEqual(None, client.ssl.get_cert_fingerprint(50, SSL.SHA256)) # Should be at least 65
self.assertEqual(None, client.ssl.get_cert_fingerprint(128, SSL.SHA512)) # Should be at least 129
self.assertEqual(None, client.ssl.get_cert_fingerprint(10, SSL.MD5)) # Should be at least 33
self.assertEqual(None, client.ssl._get_cert_subject_unknown_subfield())
self.assertNotEqual(None, client.ssl.get_cert_fingerprint(50, SSL.SHA1)) # Should be at least 41
self.assertNotEqual(None, client.ssl.get_cert_fingerprint(70, SSL.SHA256)) # Should be at least 65
self.assertNotEqual(None, client.ssl.get_cert_fingerprint(130, SSL.SHA512)) # Should be at least 129
self.assertNotEqual(None, client.ssl.get_cert_fingerprint(35, SSL.MD5)) # Should be at least 33
self.assertEqual(None, client.ssl._get_cert_fingerprint_unknown_hash_alg())
def test_client_authentication(self):
""" Force the client to authenticate.
"""
# note: when requesting client auth, the server _must_ send its
# certificate, so make sure we configure one!
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
# give the client a certificate, but let's not require server authentication
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_client_authentication_fail_bad_cert(self):
""" Ensure that the server can detect a bad client certificate.
"""
# note: when requesting client auth, the server _must_ send its
# certificate, so make sure we configure one!
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
self.client_domain.set_credentials(self._testpath("bad-server-certificate.pem"),
self._testpath("bad-server-private-key.pem"),
"server-password")
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.transport.closed
assert server.transport.closed
assert client.connection.state & Endpoint.REMOTE_UNINIT
assert server.connection.state & Endpoint.REMOTE_UNINIT
def test_client_authentication_fail_no_cert(self):
""" Ensure that the server will fail a client that does not provide a
certificate.
"""
# note: when requesting client auth, the server _must_ send its
# certificate, so make sure we configure one!
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.transport.closed
assert server.transport.closed
assert client.connection.state & Endpoint.REMOTE_UNINIT
assert server.connection.state & Endpoint.REMOTE_UNINIT
def test_client_server_authentication(self):
""" Require both client and server to mutually identify themselves.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_server_only_authentication(self):
""" Client verifies server, but server does not verify client.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_bad_server_certificate(self):
""" A server with a self-signed certificate that is not trusted by the
client. The client should reject the server.
"""
self.server_domain.set_credentials(self._testpath("bad-server-certificate.pem"),
self._testpath("bad-server-private-key.pem"),
"server-password")
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.transport.closed
assert server.transport.closed
assert client.connection.state & Endpoint.REMOTE_UNINIT
assert server.connection.state & Endpoint.REMOTE_UNINIT
del server
del client
# now re-try with a client that does not require peer verification
self.client_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
client = SslTest.SslTestConnection( self.client_domain )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_allow_unsecured_client_which_connects_unsecured(self):
""" Server allows an unsecured client to connect if configured.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
# allow unsecured clients on this connection
self.server_domain.allow_unsecured_client()
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
# non-ssl connection
client = SslTest.SslTestConnection()
client.connection.open()
server.connection.open()
self._pump( client, server )
assert server.ssl.protocol_name() is None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_allow_unsecured_client_which_connects_secured(self):
""" As per test_allow_unsecured_client_which_connects_unsecured
but client actually uses SSL
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
# allow unsecured clients on this connection
#self.server_domain.allow_unsecured_client()
# client uses ssl. Server should detect this.
client = SslTest.SslTestConnection( self.client_domain )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert server.ssl.protocol_name() is not None
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_disallow_unsecured_client(self):
""" Non-SSL Client is disallowed from connecting to server.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
# non-ssl connection
client = SslTest.SslTestConnection()
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.transport.closed
assert server.transport.closed
assert client.connection.state & Endpoint.REMOTE_UNINIT
assert server.connection.state & Endpoint.REMOTE_UNINIT
def test_session_resume(self):
""" Test resume of client session.
"""
if os.name=="nt":
raise Skipped("Windows SChannel session resume not yet implemented.")
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_peer_authentication( SSLDomain.ANONYMOUS_PEER )
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
# details will be used in initial and subsequent connections to allow session to be resumed
initial_session_details = SSLSessionDetails("my-session-id")
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain, session_details=initial_session_details )
# bring up the connection and store its state
client.connection.open()
server.connection.open()
self._pump( client, server )
assert client.ssl.protocol_name() is not None
# cleanly shutdown the connection
client.connection.close()
server.connection.close()
self._pump( client, server )
# destroy the existing clients
del client
del server
# now create a new set of connections, use last session id
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
# provide the details of the last session, allowing it to be resumed
client = SslTest.SslTestConnection( self.client_domain, session_details=initial_session_details )
#client.transport.trace(Transport.TRACE_DRV)
#server.transport.trace(Transport.TRACE_DRV)
client.connection.open()
server.connection.open()
self._pump( client, server )
assert server.ssl.protocol_name() is not None
if(API_LANGUAGE=="C"):
assert client.ssl.resume_status() == SSL.RESUME_REUSED
else:
# Java gives no way to check whether a previous session has been resumed
pass
client.connection.close()
server.connection.close()
self._pump( client, server )
# now try to resume using an unknown session-id, expect resume to fail
# and a new session is negotiated
del client
del server
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain, session_details=SSLSessionDetails("some-other-session-id") )
client.connection.open()
server.connection.open()
self._pump( client, server )
assert server.ssl.protocol_name() is not None
if(API_LANGUAGE=="C"):
assert client.ssl.resume_status() == SSL.RESUME_NEW
client.connection.close()
server.connection.close()
self._pump( client, server )
def test_multiple_sessions(self):
""" Test multiple simultaneous active SSL sessions with bi-directional
certificate verification, shared across two domains.
"""
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.server_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.server_domain.set_peer_authentication( SSLDomain.VERIFY_PEER,
self._testpath("ca-certificate.pem") )
self.client_domain.set_credentials(self._testpath("client-certificate.pem"),
self._testpath("client-private-key.pem"),
"client-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER )
max_count = 100
sessions = [(SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER ),
SslTest.SslTestConnection( self.client_domain )) for x in
range(max_count)]
for s in sessions:
s[0].connection.open()
self._pump( s[0], s[1] )
for s in sessions:
s[1].connection.open()
self._pump( s[1], s[0] )
assert s[0].ssl.cipher_name() is not None
assert s[1].ssl.cipher_name() == s[0].ssl.cipher_name()
for s in sessions:
s[1].connection.close()
self._pump( s[0], s[1] )
for s in sessions:
s[0].connection.close()
self._pump( s[1], s[0] )
def test_server_hostname_authentication(self):
""" Test authentication of the names held in the server's certificate
against various configured hostnames.
"""
if os.name=="nt":
raise Skipped("PROTON-1057: disable temporarily on Windows.")
# Check the CommonName matches (case insensitive).
# Assumes certificate contains "CN=A1.Good.Server.domain.com"
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "a1.good.server.domain.com"
assert client.ssl.peer_hostname == "a1.good.server.domain.com"
self._do_handshake( client, server )
del server
del client
self.tearDown()
# Should fail on CN name mismatch:
self.setUp()
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "A1.Good.Server.domain.comX"
self._do_handshake( client, server )
assert client.transport.closed
assert server.transport.closed
assert client.connection.state & Endpoint.REMOTE_UNINIT
assert server.connection.state & Endpoint.REMOTE_UNINIT
del server
del client
self.tearDown()
# Wildcarded Certificate
# Assumes:
# 1) certificate contains Server Alternate Names:
# "alternate.name.one.com" and "another.name.com"
# 2) certificate has wildcarded CommonName "*.prefix*.domain.com"
#
# Pass: match an alternate
self.setUp()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "alternate.Name.one.com"
self._do_handshake( client, server )
del client
del server
self.tearDown()
# Pass: match an alternate
self.setUp()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "ANOTHER.NAME.COM"
self._do_handshake(client, server)
del client
del server
self.tearDown()
# Pass: match the pattern
self.setUp()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "SOME.PREfix.domain.COM"
self._do_handshake( client, server )
del client
del server
self.tearDown()
# Pass: match the pattern
self.setUp()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "FOO.PREfixZZZ.domain.com"
self._do_handshake( client, server )
del client
del server
self.tearDown()
# Fail: must match prefix on wildcard
self.setUp()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "FOO.PREfi.domain.com"
self._do_handshake( client, server )
assert client.transport.closed
assert server.transport.closed
assert client.connection.state & Endpoint.REMOTE_UNINIT
assert server.connection.state & Endpoint.REMOTE_UNINIT
del server
del client
self.tearDown()
# Fail: leading wildcards are not optional
self.setUp()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
client.ssl.peer_hostname = "PREfix.domain.COM"
self._do_handshake( client, server )
assert client.transport.closed
assert server.transport.closed
assert client.connection.state & Endpoint.REMOTE_UNINIT
assert server.connection.state & Endpoint.REMOTE_UNINIT
self.tearDown()
# Pass: ensure that the user can give an alternate name that overrides
# the connection's configured hostname
self.setUp()
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection(self.server_domain, mode=Transport.SERVER)
client = SslTest.SslTestConnection(self.client_domain,
conn_hostname="This.Name.Does.not.Match",
ssl_peername="alternate.name.one.com")
self._do_handshake(client, server)
del client
del server
self.tearDown()
# Pass: ensure that the hostname supplied by the connection is used if
# none has been specified for the SSL instance
self.setUp()
self.server_domain.set_credentials(self._testpath("server-certificate.pem"),
self._testpath("server-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection(self.server_domain, mode=Transport.SERVER)
client = SslTest.SslTestConnection(self.client_domain,
conn_hostname="a1.good.server.domain.com")
self._do_handshake(client, server)
del client
del server
self.tearDown()
def test_server_hostname_authentication_2(self):
"""Initially separated from test_server_hostname_authentication
above to force Windows checking and sidestep PROTON-1057 exclusion.
"""
# Fail for a null peer name.
self.server_domain.set_credentials(self._testpath("server-wc-certificate.pem"),
self._testpath("server-wc-private-key.pem"),
"server-password")
self.client_domain.set_trusted_ca_db(self._testpath("ca-certificate.pem"))
self.client_domain.set_peer_authentication( SSLDomain.VERIFY_PEER_NAME )
server = SslTest.SslTestConnection( self.server_domain, mode=Transport.SERVER )
client = SslTest.SslTestConnection( self.client_domain )
# Next line results in an eventual pn_ssl_set_peer_hostname(client.ssl._ssl, None)
client.ssl.peer_hostname = None
self._do_handshake( client, server )
assert client.transport.closed
assert server.transport.closed
assert client.connection.state & Endpoint.REMOTE_UNINIT
assert server.connection.state & Endpoint.REMOTE_UNINIT
self.tearDown()
def test_defaults_messenger_app(self):
""" Test an SSL connection using the Messenger apps (no certificates)
"""
if os.name=="nt":
raise Skipped("Windows SChannel lacks anonymous cipher support.")
port = common.free_tcp_ports()[0]
receiver = common.MessengerReceiverC()
receiver.subscriptions = ["amqps://~0.0.0.0:%s" % port]
receiver.receive_count = 1
receiver.timeout = self.timeout
receiver.start()
sender = common.MessengerSenderC()
sender.targets = ["amqps://0.0.0.0:%s/X" % port]
sender.send_count = 1
sender.timeout = self.timeout
sender.start()
sender.wait()
assert sender.status() == 0, "Command '%s' failed" % str(sender.cmdline())
receiver.wait()
assert receiver.status() == 0, "Command '%s' failed" % str(receiver.cmdline())
def test_server_authentication_messenger_app(self):
""" Test an SSL authentication using the Messenger apps.
"""
port = common.free_tcp_ports()[0]
receiver = common.MessengerReceiverC()
receiver.subscriptions = ["amqps://~0.0.0.0:%s" % port]
receiver.receive_count = 1
receiver.timeout = self.timeout
# Note hack - by default we use the client-certificate for the
# _server_ because the client-certificate's common name field
# is "127.0.0.1", which will match the target address used by
# the sender.
receiver.certificate = self._testpath("client-certificate.pem")
receiver.privatekey = self._testpath("client-private-key.pem")
receiver.password = "client-password"
receiver.start()
sender = common.MessengerSenderC()
sender.targets = ["amqps://127.0.0.1:%s/X" % port]
sender.send_count = 1
sender.timeout = self.timeout
sender.ca_db = self._testpath("ca-certificate.pem")
sender.start()
sender.wait()
assert sender.status() == 0, "Command '%s' failed" % str(sender.cmdline())
receiver.wait()
assert receiver.status() == 0, "Command '%s' failed" % str(receiver.cmdline())
def test_singleton(self):
"""Verify that only a single instance of SSL can exist per Transport"""
transport = Transport()
ssl1 = SSL(transport, self.client_domain)
ssl2 = transport.ssl(self.client_domain)
ssl3 = transport.ssl(self.client_domain)
assert ssl1 is ssl2
assert ssl1 is ssl3
transport = Transport()
ssl1 = transport.ssl(self.client_domain)
ssl2 = SSL(transport, self.client_domain)
assert ssl1 is ssl2
# catch attempt to re-configure existing SSL
try:
ssl3 = SSL(transport, self.server_domain)
assert False, "Expected error did not occur!"
except SSLException:
pass
|
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
version = '3.5.1.GA'
sdk_path = r'/home/thor/.titanium/mobilesdk/linux/3.5.1.GA'
import os, sys, time, datetime, string, math, zipfile, codecs, re, shutil, subprocess, base64
from datetime import date
from xml.dom.minidom import parseString
sys.path.append(os.path.join(sdk_path, "common"))
import simplejson
try:
import markdown2 as markdown
except ImportError:
import markdown
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
ignoreFiles = ['.DS_Store','.cvsignore','.gitignore']
ignoreDirs = ['.svn','_svn','.git','CVS','CVSROOT']
required_manifest_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
manifest_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def getText(nodelist):
rc = ''
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
rc = rc.strip()
if rc.lower() in ['true', 'yes', '1']:
rc = 'true'
elif rc in ['false', 'no', '0']:
rc = 'false'
return rc
class Compiler(object):
def __init__(self, deploytype):
start_time = time.time()
if not os.path.exists(sdk_path):
print '[ERROR] Unable to find SDK path "%s"' % sdk_path
sys.exit(1)
print '[INFO] Titanium Mobile Web Module Compiler v%s' % version
self.deploytype = deploytype
self.module_path = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
self.src_path = os.path.join(self.module_path, 'src')
self.build_path = os.path.join(self.module_path, 'build')
self.load_manifest()
self.check_license()
self.load_timodule_xml()
self.check_main()
self.modules_map = {}
self.require_cache = {}
self.parse_module(self.main, None)
self.modules_to_cache = []
for module in self.require_cache:
if module != self.main and os.path.exists(os.path.join(self.build_path, module + '.js')):
self.modules_to_cache.append(module)
if 'precache' in self.timodule and 'requires' in self.timodule['precache'] and len(self.timodule['precache']['requires']):
for req in self.timodule['precache']['requires']:
self.modules_to_cache.append('commonjs:' + req)
self.precache_images = []
if 'precache' in self.timodule and 'images' in self.timodule['precache'] and len(self.timodule['precache']['images']):
for img in self.timodule['precache']['images']:
self.precache_images.append(img)
if os.path.exists(self.build_path):
shutil.rmtree(self.build_path, True)
try:
os.makedirs(self.build_path)
except:
pass
self.copy(self.src_path, self.build_path)
self.build_js()
self.minify_js()
self.package()
total_time = round(time.time() - start_time)
total_minutes = math.floor(total_time / 60)
total_seconds = total_time % 60
if total_minutes > 0:
print '[INFO] Finished in %s minutes %s seconds' % (int(total_minutes), int(total_seconds))
else:
print '[INFO] Finished in %s seconds' % int(total_time)
def load_manifest(self):
self.manifest = {}
manifest_file = os.path.join(self.module_path, 'manifest')
if not os.path.exists(manifest_file):
print '[ERROR] Unable to find manifest file'
sys.exit(1)
for line in open(manifest_file).readlines():
line = line.strip()
if line[0:1] == '#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
self.manifest[key.strip()] = value.strip()
for key in required_manifest_keys:
if not self.manifest.has_key(key):
print '[ERROR] Missing required manifest key "%s"' % key
sys.exit(1)
if manifest_defaults.has_key(key):
defvalue = manifest_defaults[key]
curvalue = self.manifest[key]
if curvalue == defvalue:
print '[WARN] Please update the manifest key: "%s" to a non-default value' % key
def check_license(self):
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default) != -1:
print '[WARN] Please update the LICENSE file with your license text before distributing'
def load_timodule_xml(self):
global_settings = {}
mobileweb_settings = {}
timodule_file = os.path.join(self.module_path, 'timodule.xml')
if not os.path.exists(timodule_file):
print '[ERROR] Unable to find timodule.xml file'
sys.exit(1)
dom = parseString(codecs.open(timodule_file,'r','utf-8','replace').read().encode('utf-8'))
root = dom.documentElement
for node in root.childNodes:
if node.nodeType == 1 and node.nodeName not in ['android', 'iphone']:
if node.nodeName == 'mobileweb':
for subnode in node.childNodes:
if subnode.nodeType == 1:
self.get_xml_children(mobileweb_settings[subnode.nodeName], subnode.childNodes)
else:
self.get_xml_children(global_settings[node.nodeName], node.childNodes)
self.timodule = dict(global_settings.items() + mobileweb_settings.items())
def check_main(self):
self.main = self.timodule['main'] if 'main' in self.timodule else self.manifest['moduleid']
if not os.path.exists(os.path.join(self.src_path, self.main + '.js')):
print '[ERROR] Unable to find main module "%s"' % self.main
sys.exit(1)
def get_xml_children(self, dest, nodes):
if len(nodes) > 1:
dest = {}
for child in nodes.childNodes:
if child.nodeType == 1:
self.get_xml_children(dest[child.nodeName], child.childNodes)
else:
dest = getText(child.childNodes)
def compact_path(self, path):
result = []
path = path.replace('\\', '/').split('/');
while len(path):
segment = path[0]
path = path[1:]
if segment == '..' and len(result) and lastSegment != '..':
result.pop()
lastSegment = result[-1]
elif segment != '.':
lastSegment = segment
result.append(segment)
return '/'.join(result);
def resolve(self, it, ref):
parts = it.split('!')
it = parts[-1]
if it.startswith('url:'):
it = it[4:]
if it.startswith('/'):
it = '.' + it
parts = it.split('/')
return [self.build_path, it]
if it.find(':') != -1:
return []
if it.startswith('/') or (len(parts) == 1 and it.endswith('.js')):
return [self.build_path, it]
if it.startswith('.') and ref is not None:
it = self.compact_path(ref + it)
parts = it.split('/')
return [self.build_path, it]
def parse_module(self, module, ref):
if module in self.require_cache or module == 'require':
return
parts = module.split('!')
if len(parts) == 1:
if module.startswith('.') and ref is not None:
module = self.compact_path(ref + module)
self.require_cache[module] = 1
dep = self.resolve(module, ref)
if not len(dep):
return
if len(parts) > 1:
self.require_cache['url:' + parts[1]] = 1
filename = dep[1]
if not filename.endswith('.js'):
filename += '.js'
source = os.path.join(dep[0], filename)
if not os.path.exists(source):
return
source = codecs.open(source, 'r', 'utf-8').read()
pattern = re.compile('define\(\s*([\'\"][^\'\"]*[\'\"]\s*)?,?\s*(\[[^\]]+\])\s*?,?\s*(function|\{)')
results = pattern.search(source)
if results is None:
self.modules_map[module] = []
else:
groups = results.groups()
if groups is not None and len(groups):
if groups[1] is None:
self.modules_map[module] = []
else:
deps = self.parse_deps(groups[1])
for i in range(0, len(deps)):
dep = deps[i]
parts = dep.split('!')
ref = module.split('/')
ref.pop()
ref = '/'.join(ref) + '/'
if dep.startswith('.'):
deps[i] = self.compact_path(ref + dep)
if len(parts) == 1:
if dep.startswith('./'):
parts = module.split('/')
parts.pop()
parts.append(dep)
self.parse_module(self.compact_path('/'.join(parts)), ref)
else:
self.parse_module(dep, ref)
else:
self.modules_map[dep] = parts[0]
self.parse_module(parts[0], module)
if parts[0] == 'Ti/_/text':
if dep.startswith('./'):
parts = module.split('/')
parts.pop()
parts.append(dep)
self.parse_module(self.compact_path('/'.join(parts)), ref)
else:
self.parse_module(dep, ref)
self.modules_map[module] = deps
def parse_deps(self, deps):
found = []
if len(deps) > 2:
deps = deps[1:-1]
deps = deps.split(',')
for dep in deps:
dep = dep.strip().split(' ')[0].strip()
if dep.startswith('\'') or dep.startswith('"'):
found.append(simplejson.loads(dep))
return found
def copy(self, src_path, dest_path):
print '[INFO] Copying %s...' % src_path
for root, dirs, files in os.walk(src_path):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name)
for file in files:
if file in ignoreFiles or file.startswith('._'):
continue
source = os.path.join(root, file)
dest = os.path.expanduser(source.replace(src_path, dest_path, 1))
dest_dir = os.path.expanduser(os.path.split(dest)[0])
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(source, dest)
def build_js(self):
main_file = os.path.join(self.build_path, self.main + '.js')
tmp = main_file + '.tmp'
js = codecs.open(tmp, 'w', encoding='utf-8')
if len(self.modules_to_cache) > 0 or len(self.precache_images) > 0:
js.write('require.cache({\n')
first = True
for x in self.modules_to_cache:
if x == self.main:
continue
is_cjs = False
if x.startswith('commonjs:'):
is_cjs = True
x = x[9:]
dep = self.resolve(x, None)
if not len(dep):
continue
if not first:
js.write(',\n')
first = False
filename = dep[1]
if not filename.endswith('.js'):
filename += '.js'
file_path = os.path.join(dep[0], filename)
if x.startswith('url:'):
source = file_path + '.uncompressed.js'
if self.minify:
os.rename(file_path, source)
print '[INFO] Minifying include %s' % file_path
p = subprocess.Popen('java -Xms256m -Xmx256m -jar "%s" --compilation_level SIMPLE_OPTIMIZATIONS --js "%s" --js_output_file "%s"' % (os.path.join(sdk_path, 'mobileweb', 'closureCompiler', 'compiler.jar'), source, file_path), shell=True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print '[ERROR] Failed to minify "%s"' % file_path
for line in stderr.split('\n'):
if len(line):
print '[ERROR] %s' % line
print '[WARN] Leaving %s un-minified' % file_path
os.remove(file_path)
shutil.copy(source, file_path)
js.write('"%s":"%s"' % (x, codecs.open(file_path, 'r', 'utf-8').read().strip().replace('\\', '\\\\').replace('\n', '\\n\\\n').replace('\"', '\\\"')))
elif is_cjs:
js.write('"%s":function(){\n/* %s */\ndefine(function(require, exports, module){\n%s\n});\n}' % (x, file_path.replace(self.build_path, ''), codecs.open(file_path, 'r', 'utf-8').read()))
else:
js.write('"%s":function(){\n/* %s */\n\n%s\n}' % (x, file_path.replace(self.build_path, ''), codecs.open(file_path, 'r', 'utf-8').read()))
image_mime_types = {
'.png': 'image/png',
'.gif': 'image/gif',
'.jpg': 'image/jpg',
'.jpeg': 'image/jpg'
}
for x in self.precache_images:
x = x.replace('\\', '/')
y = x
if y.startswith(os.sep):
y = '.' + y
img = os.path.join(self.module_path, os.sep.join(y.split('/')))
if os.path.exists(img):
fname, ext = os.path.splitext(img.lower())
if ext in image_mime_types:
if not first:
js.write(',\n')
first = False
js.write('"url:%s":"data:%s;base64,%s"' % (x, image_mime_types[ext], base64.b64encode(open(img,'rb').read())))
js.write('});\n')
js.write(codecs.open(main_file, 'r', 'utf-8').read())
js.close()
os.remove(main_file)
os.rename(tmp, main_file)
def minify_js(self):
subprocess.call('java -Xms256m -Xmx256m -cp "%s%s%s" -Djava.awt.headless=true minify "%s"' % (
os.path.join(sdk_path, 'mobileweb', 'minify'),
os.pathsep,
os.path.join(sdk_path, 'mobileweb', 'closureCompiler', 'compiler.jar'),
self.build_path
), shell=True)
def generate_doc(self):
docdir = os.path.join(self.module_path, 'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(self.module_path, '..', 'documentation')
if not os.path.exists(docdir):
print '[WARN] Couldn\'t find documentation file at: %s' % docdir
return None
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or file.startswith('._') or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir, file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def zip_dir(self, zf, dir, basepath):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name)
for file in files:
if file in ignoreFiles or file.startswith('._') or file.endswith('.uncompressed.js'):
continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc':
continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def package(self):
name = self.manifest['name'].lower()
moduleid = self.manifest['moduleid'].lower()
version = self.manifest['version']
install_path = 'modules/mobileweb/%s/%s' % (moduleid, version)
zip_file = os.path.join(self.module_path, '%s-mobileweb-%s.zip' % (moduleid,version))
if os.path.exists(zip_file):
os.remove(zip_file)
zf = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED)
zf.write(os.path.join(self.module_path, 'manifest'), '%s/manifest' % install_path)
license_file = os.path.join(self.module_path, 'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(self.module_path, '..', 'LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % install_path)
zf.writestr('%s/package.json' % install_path, simplejson.dumps({
'name': self.manifest['name'],
'description': self.manifest['description'],
'version': self.manifest['version'],
'directories': {
'lib': './src'
},
'main': self.main
}, indent=4, sort_keys=True))
self.zip_dir(zf, 'build', '%s/src' % install_path)
self.zip_dir(zf, 'example', '%s/example' % install_path)
docs = self.generate_doc()
if docs != None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file, '.md', '.html')
zf.writestr('%s/documentation/%s' % (install_path, filename), html)
zf.close()
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1].lower() in ['help', '--help', '-h']:
print 'Usage: %s [<deploytype>]' % os.path.basename(sys.argv[0])
sys.exit(1)
Compiler('production' if len(sys.argv) <= 1 else sys.argv[1].lower())
sys.exit(0)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
from django.shortcuts import render, redirect
from django.contrib.auth import (REDIRECT_FIELD_NAME, get_user_model,
load_backend)
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.models import Group, Permission
from django.http import HttpResponseRedirect, HttpResponse
from django.conf import settings
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.decorators import list_route
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import mixins, viewsets
from . import backends
from . import filters
from . import serializers
from pdc.apps.common.viewsets import StrictQueryParamMixin, ChangeSetUpdateModelMixin
from pdc.apps.utils.utils import group_obj_export
def remoteuserlogin(request):
# if REDIRECT_FIELD_NAME is present in request.GET and has blank
# value, that can cause redirect loop while redirecting to it
redirect_to = request.GET.get(REDIRECT_FIELD_NAME, '/').strip() or '/'
if request.user.is_authenticated() and request.user.is_active:
return HttpResponseRedirect(redirect_to)
try:
request.session.flush()
except Exception:
pass
if request.user.is_anonymous():
reason = "Failed to authenticate. Make sure your browser is correctly configured."
elif not request.user.is_active:
reason = "Account is not active."
context = {'reason': reason}
return render(request, 'auth_error.html', context)
def logout(request):
# if REDIRECT_FIELD_NAME is present in request.GET and has blank
# value, that can cause redirect loop while redirecting to it
redirect_to = request.GET.get(REDIRECT_FIELD_NAME, '/').strip() or '/'
if not request.user.is_authenticated():
return HttpResponseRedirect(redirect_to)
backend = None
if 'auth_backend' in request.session:
backend = load_backend(request.session['auth_backend'])
auth_logout(request)
if backend:
redirect_to = getattr(backend, 'logout_url', '') + redirect_to
return HttpResponseRedirect(redirect_to)
def user_profile(request):
context = {'has_ldap': hasattr(settings, "LDAP_URI")}
return render(request, 'user_profile.html', context)
def refresh_ldap_groups(request):
user = request.user
backends.update_user_from_ldap(user)
if request.is_ajax():
return HttpResponse(json.dumps({
'username': user.username,
'fullname': user.full_name,
'e-mail': user.email,
'is_superuser': user.is_superuser,
'is_staff': user.is_staff,
'groups': [g.name for g in user.groups.all()],
'permissions': list(user.get_all_permissions()),
}), content_type="application/json")
return redirect("user/profile")
class TokenViewSet(StrictQueryParamMixin, viewsets.ViewSet):
"""
## REST API Auth ##
We use `TokenAuthentication` for API Authentication.
Unauthenticated user will not be able to access to APIs.
**WARNING:** Please do not share your token with anyone else.
**NOTE:** It's highly recommended to make sure your API is only available over `https` when using `TokenAuthentication`.
We use DjangoModelPermissions for RESTful API permissions.
Authorization will only be granted if the user is authenticated
and has the relevant model permissions assigned.
* __POST__ requests require the user to have the `add` permission on the model.
* __PUT__ and __PATCH__ requests require the user to have the `change` permission on the model.
* __DELETE__ requests require the user to have the `delete` permission on the model.
### Using Token
* obtain token
curl --negotiate -u : -H "Accept: application/json" $URL:token-obtain$
you will get a `Response` like:
{"token": "00bf04e8187f6e6d54f510515e8bde88e5bb7904"}
* then you should add one HTTP HEADER with this token in this format with every request need authentication:
Authorization: Token 00bf04e8187f6e6d54f510515e8bde88e5bb790
for curl, it should be:
curl -H 'Authorization: Token 00bf04e8187f6e6d54f510515e8bde88e5bb790' %(HOST_NAME)s/%(API_PATH)s/
* in case you want refresh your token, you can do it with:
curl --negotiate -u : -H "Accept: application/json" $URL:token-refresh$
you will get a `Response` with refreshed token:
{"token": "00bf04e8187f6e6d54f510515e8bde88e5bb7904"}
"""
permission_classes = [IsAuthenticated]
# Dummy list view for showing ViewSet docstring.
def list(self, request):
return Response()
@list_route(methods=['get', 'post'])
def obtain(self, request):
"""
### Obtain Token
__URL__: $LINK:token-obtain$
__EXAMPLE__:
Run:
curl --negotiate -u : -H "Accept: application/json" $URL:token-obtain$
you will get a `Response` like:
{"token": "00bf04e8187f6e6d54f510515e8bde88e5bb7904"}
"""
if request.user.is_authenticated():
if request.user.is_active:
token, created = Token.objects.get_or_create(user=request.user)
return Response({'token': token.key})
else:
reason = {"Obtain Token Error": "You're not an active user."}
return Response(reason, status=status.HTTP_401_UNAUTHORIZED)
else:
reason = {"Obtain Token Error": "Failed to authenticate."}
return Response(reason, status=status.HTTP_401_UNAUTHORIZED)
@list_route(methods=['get', 'put'])
def refresh(self, request):
"""
### Refresh Token
__URL__: $LINK:token-refresh$
__EXAMPLE__:
Run:
curl --negotiate -u : -H "Accept: application/json" $URL:token-refresh$
# or
curl --negotiate -u : -X PUT -H "Accept: application/json" $URL:token-refresh$
you will get a `Response` with refreshed token:
{"token": "00bf04e8187f6e6d54f510515e8bde88e5bb7904"}
"""
if request.user.is_authenticated():
if request.user.is_active:
try:
token = Token.objects.get(user=request.user)
token.delete()
except Token.DoesNotExist:
reason = {"Refresh Token Error": "You have not got a token yet, please try obtain first."}
return Response(reason, status=status.HTTP_400_BAD_REQUEST)
token = Token.objects.create(user=request.user)
return Response({'token': token.key})
else:
reason = {"Refresh Token Error": "You're not an active user."}
return Response(reason, status=401)
else:
reason = {"Refresh Token Error": "Authenticate Failed."}
return Response(reason, status=401)
class PermissionViewSet(StrictQueryParamMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
##Overview##
This page shows the usage of the **Permission API**, please see the
following for more details.
"""
def list(self, request, *args, **kwargs):
"""
### LIST
__Method__:
`GET`
__Query Params__:
%(FILTERS)s
__URL__: $LINK:permission-list$
__Response__: a paged list of following objects
%(SERIALIZER)s
__Example__:
curl -H "Accept: application/json" -X GET $URL:permission-list$
# output
{
"count": 150,
"next": "$URL:permission-list$?page=2",
"previous": null,
"results": [
{
"codename": "add_logentry",
"app_label": "admin",
"model": "logentry"
},
...
]
}
With query params:
curl -H "Accept: application/json" -G $URL:permission-list$ --data-urlencode "codename=add_logentry"
# output
{
"count": 1,
"next": null,
"previous": null,
"results": [
{
"codename": "add_logentry",
"app_label": "admin",
"model": "logentry"
}
]
}
"""
return super(PermissionViewSet, self).list(request, *args, **kwargs)
queryset = Permission.objects.all().order_by("id")
serializer_class = serializers.PermissionSerializer
filter_class = filters.PermissionFilter
class GroupViewSet(ChangeSetUpdateModelMixin,
StrictQueryParamMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
"""
##Overview##
This page shows the usage of the **Group API**, please see the
following for more details.
"""
def list(self, request, *args, **kwargs):
"""
### LIST
__Method__:
`GET`
__URL__: $LINK:group-list$
__Query Params__:
%(FILTERS)s
__Response__: a paged list of following objects
%(SERIALIZER)s
__Example__:
{
"count": 2,
"next": null,
"previous": null,
"results": [
{
"url": "$URL:group-detail:1$",
"name": "group_add_group",
"permissions": [
{
"codename": "add_group",
"app_label": "auth",
"model": "group"
}
]
},
{
"url": "$URL:group-detail:2$",
"name": "group_change_change",
"permissions": [
{
"codename": "change_change",
"app_label": "changeset",
"model": "change"
}
]
}
]
}
"""
return super(GroupViewSet, self).list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""
### RETRIEVE
__Method__:
`GET`
__URL__: $LINK:group-detail:instance_pk$
__Response__:
%(SERIALIZER)s
__Example__:
# curl command
curl -H "Content-Type: application/json" $URL:group-detail:1$
# output
{
"url": "$URL:group-detail:1$,
"name": "group_add_group",
"permissions": [
{
"codename": "add_group",
"app_label": "auth",
"model": "group"
}
]
}
"""
return super(GroupViewSet, self).retrieve(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
"""
### UPDATE
__Method__:
`PUT`: to update name and permissions
`PATCH`: to update name or permissions
__URL__: $LINK:group-detail:instance_pk$
__Response__:
%(SERIALIZER)s
__Example__:
PUT:
# cat put_data.json
{
"name": "new_group",
"permissions": [
{
"codename": "change_change",
"app_label": "changeset",
"model": "change"
},
{
"codename": "add_group",
"app_label": "auth",
"model": "group"
}
]
}
# curl command
curl -H "Content-Type: application/json" \\
-X PUT \\
--data @put_data.json \\
$URL:group-detail:1$
# output
{
"url": "$URL:group-detail:1$",
"name": "new_group",
"permissions": [
{
"codename": "change_change",
"app_label": "changeset",
"model": "change"
},
{
"codename": "add_group",
"app_label": "auth",
"model": "group"
}
]
}
PATCH:
# cat patch_data.json
{
"permissions": [
{
"codename": "change_change",
"app_label": "changeset",
"model": "change"
},
{
"codename": "add_group",
"app_label": "auth",
"model": "group"
}
]
}
# curl command
curl -H "Content-Type: application/json" \\
-X PATCH \\
--data @patch_data.json \\
$URL:group-detail:1$
# output
{
"url": "$URL:group-detail:1$",
"name": "group_add_group",
"permissions": [
{
"codename": "change_change",
"app_label": "changeset",
"model": "change"
},
{
"codename": "add_group",
"app_label": "auth",
"model": "group"
}
]
}
"""
return super(GroupViewSet, self).update(request, *args, **kwargs)
queryset = Group.objects.all().order_by('id')
serializer_class = serializers.GroupSerializer
filter_class = filters.GroupFilter
Group.export = group_obj_export
class CurrentUserViewSet(mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
This end-point provides programmatic access to information about current
user.
"""
queryset = get_user_model().objects.none()
def list(self, request):
"""
Get information about current user.
__Method__: `GET`
__URL__: $LINK:currentuser-list$
__Response__:
{
"username": string,
"fullname": string,
"e-mail": string,
"is_superuser": bool,
"is_staff": bool,
"groups": [string],
"permissions": [string]
}
"""
user = request.user
if not user.is_authenticated():
return Response(status=status.HTTP_401_UNAUTHORIZED,
data={'detail': 'Access denied to unauthorized users.'})
return Response(data={
'username': user.username,
'fullname': user.full_name,
'e-mail': user.email,
'is_superuser': user.is_superuser,
'is_staff': user.is_staff,
'groups': [g.name for g in user.groups.all()],
'permissions': sorted(list(user.get_all_permissions())),
})
|
|
# 6.00.2x Problem Set 5
# Graph optimization
# Finding shortest paths through MIT buildings
#
import string
# This imports everything from `graph.py` as if it was defined in this file!
from graph import *
#
# Problem 2: Building up the Campus Map
#
# Before you write any code, write a couple of sentences here
# describing how you will model this problem as a graph.
# This is a helpful exercise to help you organize your
# thoughts before you tackle a big design problem!
#
def load_map(mapFilename):
"""
Parses the map file and constructs a directed graph
Parameters:
mapFilename : name of the map file
Assumes:
Each entry in the map file consists of the following four positive
integers, separated by a blank space:
From To TotalDistance DistanceOutdoors
e.g.
32 76 54 23
This entry would become an edge from 32 to 76.
Returns:
a directed graph representing the map
"""
# TODO
print "Loading map from file..."
gMap = WeightedDigraph()
with open(mapFilename ,'r', 0) as ipFile:
for line in ipFile:
data = line.split()
srcNode = Node(str(data[0]))
destNode = Node(str(data[1]))
if not gMap.hasNode(srcNode):
gMap.addNode(srcNode)
if not gMap.hasNode(destNode):
gMap.addNode(destNode)
aEdge = WeightedEdge(srcNode, destNode, int(data[2]), int(data[3]))
gMap.addEdge(aEdge)
return gMap
# Problem 3: Finding the Shortest Path using Brute Force Search
#
# State the optimization problem as a function to minimize
# and what the constraints are
#
def bruteForceSearch(digraph, start, end, maxTotalDist, maxDistOutdoors):
"""
Finds the shortest path from start to end using brute-force approach.
The total distance travelled on the path must not exceed maxTotalDist, and
the distance spent outdoor on this path must not exceed maxDistOutdoors.
Parameters:
digraph: instance of class Digraph or its subclass
start, end: start & end building numbers (strings)
maxTotalDist : maximum total distance on a path (integer)
maxDistOutdoors: maximum distance spent outdoors on a path (integer)
Assumes:
start and end are numbers for existing buildings in graph
Returns:
The shortest-path from start to end, represented by
a list of building numbers (in strings), [n_1, n_2, ..., n_k],
where there exists an edge from n_i to n_(i+1) in digraph,
for all 1 <= i < k.
If there exists no path that satisfies maxTotalDist and
maxDistOutdoors constraints, then raises a ValueError.
"""
##### Internal Functions Start
def getPaths(graph, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
paths = []
for node in graph.childrenOf(start):
if node not in path:
newpaths = getPaths(graph, node, end, path)
if newpaths != None:
for newpath in newpaths:
paths.append(newpath)
return paths
def calcPathLength(digraph,aPath):
totDist = 0.0
outDist = 0.0
for idx in xrange(len(aPath)-1):
nextNode = aPath[idx+1]
for link in digraph.edges[aPath[idx]]:
if link[0] == nextNode:
totDist += link[1][0]
outDist += link[1][1]
return totDist, outDist
def calcPathsDetails(digraph, pathsList):
pathsDetail = []
for path in pathsList:
totDist, outDist = calcPathLength(digraph, path)
pathsDetail.append([path, totDist, outDist])
return pathsDetail[:]
def calcShortestPathWithCriteria(pathsDetailed, \
maxTotalDist, maxDistOutdoors):
shortestPath = []
shortestPathVal = float(maxTotalDist)
for path in pathsDetailed:
if path[1] <= maxTotalDist and path[2] <= maxDistOutdoors:
if path[1] <= shortestPathVal:
shortestPathVal = path[1]
shortestPath = path[0]
if len(shortestPath) == 0:
return list(), None
else :
sPath = []
for node in shortestPath:
sPath.append(node.getName())
return sPath[:], shortestPathVal
##### Internal Functions End
# Step0 : load map | loaded map is availble
# Step1 : Calcuate all availabe path
pathsAvailable = getPaths(digraph, Node(start), Node(end))
# Step2 : Calculate path distances for available paths
pathsAvailable = calcPathsDetails(digraph, pathsAvailable)
# Step3 : Calculate Shortest path meeting criteria for total distance and
# outdoor distance
sPath, sPathVal = calcShortestPathWithCriteria(pathsAvailable,
maxTotalDist,
maxDistOutdoors)
if len(sPath) == 0:
raise ValueError(" No path available meeting criteria")
else:
return sPath
#
# Problem 4: Finding the Shorest Path using Optimized Search Method
#
def directedDFS(digraph, start, end, maxTotalDist, maxDistOutdoors):
"""
Finds the shortest path from start to end using directed depth-first.
search approach. The total distance travelled on the path must not
exceed maxTotalDist, and the distance spent outdoor on this path must
not exceed maxDistOutdoors.
Parameters:
digraph: instance of class Digraph or its subclass
start, end: start & end building numbers (strings)
maxTotalDist : maximum total distance on a path (integer)
maxDistOutdoors: maximum distance spent outdoors on a path (integer)
Assumes:
start and end are numbers for existing buildings in graph
Returns:
The shortest-path from start to end, represented by
a list of building numbers (in strings), [n_1, n_2, ..., n_k],
where there exists an edge from n_i to n_(i+1) in digraph,
for all 1 <= i < k.
If there exists no path that satisfies maxTotalDist and
maxDistOutdoors constraints, then raises a ValueError.
"""
stack = []
stack.append([[Node(start)], (0.0,0.0)])
sDist = maxTotalDist + 1.0
sPath = []
totalDist = 0.0
outdoorsDist = 0.0
nEnd = Node(end)
while len(stack) != 0:
popEntry = stack.pop()
path = popEntry[0]
curNode = path[-1]
for destNode, (nodeTotDist, nodeOutDist) in digraph.edges[curNode]:
totalDist = popEntry[1][0]
outdoorsDist = popEntry[1][1]
if destNode not in path :
newPath = path + [destNode]
totalDist += nodeTotDist
outdoorsDist += nodeOutDist
criteria = (totalDist > sDist) or (totalDist > maxTotalDist) or (outdoorsDist > maxDistOutdoors)
if criteria :
continue
stack.append([newPath, (totalDist, outdoorsDist)])
if destNode == nEnd:
sPath = newPath
sDist = totalDist
if len(sPath) == 0:
raise ValueError(" No path available meeting criteria")
else:
shortestPath = []
for node in sPath:
shortestPath.append(node.getName())
return shortestPath[:]
# Uncomment below when ready to test
#### NOTE! These tests may take a few minutes to run!! ####
if __name__ == '__main__':
#Test cases
mitMap = load_map("mit_map.txt")
print isinstance(mitMap, Digraph)
print isinstance(mitMap, WeightedDigraph)
print 'nodes', mitMap.nodes
print 'edges', mitMap.edges
LARGE_DIST = 1000000
# Test case 1
print "---------------"
print "Test case 1:"
print "Find the shortest-path from Building 32 to 56"
expectedPath1 = ['32', '56']
brutePath1 = bruteForceSearch(mitMap, '32', '56', LARGE_DIST, LARGE_DIST)
dfsPath1 = directedDFS(mitMap, '32', '56', LARGE_DIST, LARGE_DIST)
# dfsPath1 = brutePath1
print "Expected: ", expectedPath1
print "Brute-force: ", brutePath1
print "DFS: ", dfsPath1
print "Correct? BFS: {0}; DFS: {1}".format(expectedPath1 == brutePath1, expectedPath1 == dfsPath1)
# Test case 2
print "---------------"
print "Test case 2:"
print "Find the shortest-path from Building 32 to 56 without going outdoors"
expectedPath2 = ['32', '36', '26', '16', '56']
brutePath2 = bruteForceSearch(mitMap, '32', '56', LARGE_DIST, 0)
dfsPath2 = directedDFS(mitMap, '32', '56', LARGE_DIST, 0)
# dfsPath2 = brutePath2
print "Expected: ", expectedPath2
print "Brute-force: ", brutePath2
print "DFS: ", dfsPath2
print "Correct? BFS: {0}; DFS: {1}".format(expectedPath2 == brutePath2, expectedPath2 == dfsPath2)
# Test case 3
print "---------------"
print "Test case 3:"
print "Find the shortest-path from Building 2 to 9"
expectedPath3 = ['2', '3', '7', '9']
brutePath3 = bruteForceSearch(mitMap, '2', '9', LARGE_DIST, LARGE_DIST)
dfsPath3 = directedDFS(mitMap, '2', '9', LARGE_DIST, LARGE_DIST)
# dfsPath3 = brutePath3
print "Expected: ", expectedPath3
print "Brute-force: ", brutePath3
print "DFS: ", dfsPath3
print "Correct? BFS: {0}; DFS: {1}".format(expectedPath3 == brutePath3, expectedPath3 == dfsPath3)
# Test case 4
print "---------------"
print "Test case 4:"
print "Find the shortest-path from Building 2 to 9 without going outdoors"
expectedPath4 = ['2', '4', '10', '13', '9']
brutePath4 = bruteForceSearch(mitMap, '2', '9', LARGE_DIST, 0)
dfsPath4 = directedDFS(mitMap, '2', '9', LARGE_DIST, 0)
# dfsPath4 = brutePath4
print "Expected: ", expectedPath4
print "Brute-force: ", brutePath4
print "DFS: ", dfsPath4
print "Correct? BFS: {0}; DFS: {1}".format(expectedPath4 == brutePath4, expectedPath4 == dfsPath4)
# Test case 5
print "---------------"
print "Test case 5:"
print "Find the shortest-path from Building 1 to 32"
expectedPath5 = ['1', '4', '12', '32']
brutePath5 = bruteForceSearch(mitMap, '1', '32', LARGE_DIST, LARGE_DIST)
dfsPath5 = directedDFS(mitMap, '1', '32', LARGE_DIST, LARGE_DIST)
# dfsPath5 = brutePath5
print "Expected: ", expectedPath5
print "Brute-force: ", brutePath5
print "DFS: ", dfsPath5
print "Correct? BFS: {0}; DFS: {1}".format(expectedPath5 == brutePath5, expectedPath5 == dfsPath5)
# Test case 6
print "---------------"
print "Test case 6:"
print "Find the shortest-path from Building 1 to 32 without going outdoors"
expectedPath6 = ['1', '3', '10', '4', '12', '24', '34', '36', '32']
brutePath6 = bruteForceSearch(mitMap, '1', '32', LARGE_DIST, 0)
dfsPath6 = directedDFS(mitMap, '1', '32', LARGE_DIST, 0)
# dfsPath6 = brutePath6
print "Expected: ", expectedPath6
print "Brute-force: ", brutePath6
print "DFS: ", dfsPath6
print "Correct? BFS: {0}; DFS: {1}".format(expectedPath6 == brutePath6, expectedPath6 == dfsPath6)
# Test case 7
print "---------------"
print "Test case 7:"
print "Find the shortest-path from Building 8 to 50 without going outdoors"
bruteRaisedErr = 'No'
dfsRaisedErr = 'No'
try:
bruteForceSearch(mitMap, '8', '50', LARGE_DIST, 0)
except ValueError:
bruteRaisedErr = 'Yes'
try:
directedDFS(mitMap, '8', '50', LARGE_DIST, 0)
except ValueError:
dfsRaisedErr = 'Yes'
print "Expected: No such path! Should throw a value error."
print "Did brute force search raise an error?", bruteRaisedErr
print "Did DFS search raise an error?", dfsRaisedErr
# Test case 8
print "---------------"
print "Test case 8:"
print "Find the shortest-path from Building 10 to 32 without walking"
print "more than 100 meters in total"
bruteRaisedErr = 'No'
dfsRaisedErr = 'No'
try:
bruteForceSearch(mitMap, '10', '32', 100, LARGE_DIST)
except ValueError:
bruteRaisedErr = 'Yes'
try:
directedDFS(mitMap, '10', '32', 100, LARGE_DIST)
except ValueError:
dfsRaisedErr = 'Yes'
print "Expected: No such path! Should throw a value error."
print "Did brute force search raise an error?", bruteRaisedErr
print "Did DFS search raise an error?", dfsRaisedErr
|
|
import json
from threading import Lock
from enum import Enum
from colorlog import error, warning as warn, info
from typing import List, Any, Dict, Callable, Optional, Tuple
from datetime import datetime
from yangson.datamodel import DataModel
from yangson.enumerations import ContentType, ValidationScope
from yangson.schemanode import (
SchemaNode,
ListNode,
LeafListNode,
SchemaError,
SemanticError,
InternalNode,
ContainerNode
)
from yangson.instvalue import ArrayValue, ObjectValue
from yangson.instance import (
InstanceNode,
NonexistentInstance,
InstanceValueError,
MemberName,
EntryKeys,
EntryIndex,
InstanceRoute,
ArrayEntry,
RootNode,
ObjectMember
)
from .helpers import PathFormat, ErrorHelpers, LogHelpers, DataHelpers, JsonNodeT
from .config import CONFIG, CONFIG_NACM
from .nacm import NacmConfig, Permission, Action, NacmForbiddenError
from .handler_list import (
OP_HANDLERS,
STATE_DATA_HANDLES,
CONF_DATA_HANDLES,
ConfDataObjectHandler,
ConfDataListHandler,
StateDataContainerHandler,
StateDataListHandler
)
from .errors import JetconfError
epretty = ErrorHelpers.epretty
debug_data = LogHelpers.create_module_dbg_logger(__name__)
class ChangeType(Enum):
CREATE = 0,
REPLACE = 1,
DELETE = 2
class DataLockError(JetconfError):
pass
class StagingDataException(JetconfError):
pass
class InstanceAlreadyPresent(JetconfError):
pass
class HandlerError(JetconfError):
pass
class NoHandlerError(HandlerError):
pass
class ConfHandlerFailedError(HandlerError):
pass
class OpHandlerFailedError(HandlerError):
pass
class NoHandlerForOpError(NoHandlerError):
def __init__(self, op_name: str):
self.op_name = op_name
def __str__(self):
return "Nonexistent handler for operation \"{}\"".format(self.op_name)
class NoHandlerForStateDataError(NoHandlerError):
pass
class RpcInfo:
def __init__(self):
self.username = None # type: str
self.path = None # type: str
self.qs = None # type: Dict[str, List[str]]
self.path_format = PathFormat.URL # type: PathFormat
self.skip_nacm_check = False # type: bool
self.op_name = None # type: str
self.op_input_args = None # type: ObjectValue
class DataChange:
def __init__(self, change_type: ChangeType, rpc_info: RpcInfo, input_data: JsonNodeT, root_after_change: InstanceNode, nacm_modified: bool):
self.change_type = change_type
self.rpc_info = rpc_info
self.input_data = input_data
self.root_after_change = root_after_change
self.nacm_modified = nacm_modified
class UsrChangeJournal:
def __init__(self, root_origin: InstanceNode, transaction_opts: Optional[JsonNodeT]):
self._root_origin = root_origin
self._transaction_opts = transaction_opts
self._journal = [] # type: List[DataChange]
def get_root_head(self) -> InstanceNode:
if len(self._journal) > 0:
return self._journal[-1].root_after_change
else:
return self._root_origin
def get_root_origin(self) -> InstanceNode:
return self._root_origin
def add(self, change: DataChange):
self._journal.append(change)
def list(self) -> JsonNodeT:
changes_info = []
for ch in self._journal:
changes_info.append([ch.change_type.name, ch.rpc_info.path])
return changes_info
def commit(self, ds: "BaseDatastore") -> bool:
nacm_modified = False
if len(self._journal) == 0:
return False
if hash(ds.get_data_root()) == hash(self._root_origin):
info("Commiting new configuration (swapping roots)")
# Set new root
nr = self.get_root_head()
for change in self._journal:
nacm_modified = nacm_modified or change.nacm_modified
else:
info("Commiting new configuration (re-applying changes)")
nr = ds.get_data_root()
for change in self._journal:
nacm_modified = nacm_modified or change.nacm_modified
if change.change_type == ChangeType.CREATE:
nr = ds.create_node_rpc(nr, change.rpc_info, change.input_data)[0]
elif change.change_type == ChangeType.REPLACE:
nr = ds.update_node_rpc(nr, change.rpc_info, change.input_data)[0]
elif change.change_type == ChangeType.DELETE:
nr = ds.delete_node_rpc(nr, change.rpc_info)[0]
try:
# Validate syntax and semantics of new data
if CONFIG["GLOBAL"]["VALIDATE_TRANSACTIONS"] is True:
nr.validate(ValidationScope.all, ContentType.config)
except (SchemaError, SemanticError) as e:
error("Data validation error:")
error(epretty(e))
raise e
# Set new data root
ds.set_data_root(nr)
# Update NACM if NACM data has been affected by any edit
if nacm_modified and ds.nacm is not None:
ds.nacm.update()
# Call commit begin hook
begin_hook_failed = False
try:
ds.commit_begin_callback(self._transaction_opts)
except Exception as e:
error("Exception occured in commit_begin handler: {}".format(epretty(e)))
begin_hook_failed = True
# Run schema node handlers
conf_handler_failed = False
if not begin_hook_failed:
try:
for change in self._journal:
ii = ds.parse_ii(change.rpc_info.path, change.rpc_info.path_format)
ds.run_conf_edit_handler(ii, change)
except Exception as e:
error("Exception occured in edit handler: {}".format(epretty(e)))
conf_handler_failed = True
# Call commit end hook
end_hook_failed = False
end_hook_abort_failed = False
if not (begin_hook_failed or conf_handler_failed):
try:
ds.commit_end_callback(self._transaction_opts, failed=False)
except Exception as e:
error("Exception occured in commit_end handler: {}".format(epretty(e)))
end_hook_failed = True
if begin_hook_failed or conf_handler_failed or end_hook_failed:
try:
# Call commit_end callback again with "failed" argument set to True
ds.commit_end_callback(self._transaction_opts, failed=True)
except Exception as e:
error("Exception occured in commit_end handler (abort): {}".format(epretty(e)))
end_hook_abort_failed = True
# Return to previous version of data and raise an exception if something went wrong
if begin_hook_failed or conf_handler_failed or end_hook_failed or end_hook_abort_failed:
ds.data_root_rollback(history_steps=1, store_current=False)
# Update NACM again after rollback
if nacm_modified and ds.nacm is not None:
ds.nacm.update()
raise ConfHandlerFailedError("(see logged)")
return True
class BaseDatastore:
def __init__(self, dm: DataModel, with_nacm: bool=False):
def _blankfn(*args, **kwargs):
pass
self.name = ""
self.nacm = None # type: NacmConfig
self._data = None # type: InstanceNode
self._data_history = [] # type: List[InstanceNode]
self._yang_lib_data = None # type: InstanceNode
self._dm = dm # type: DataModel
self._data_lock = Lock()
self._lock_username = None # type: str
self._usr_journals = {} # type: Dict[str, UsrChangeJournal]
self.commit_begin_callback = _blankfn # type: Callable[..., bool]
self.commit_end_callback = _blankfn # type: Callable[..., bool]
if with_nacm:
self.nacm = NacmConfig(self, self._dm)
self._yang_lib_data = self._dm.from_raw(self._dm.yang_library)
# Returns DataModel object
def get_dm(self) -> DataModel:
return self._dm
# Returns the root node of data tree
def get_data_root(self, previous_version: int=0) -> InstanceNode:
if previous_version > 0:
return self._data_history[-previous_version]
else:
return self._data
def get_yl_data_root(self) -> InstanceNode:
return self._yang_lib_data
def make_user_journal(self, username: str, transaction_opts: Optional[JsonNodeT]):
usr_journal = self._usr_journals.get(username)
if usr_journal is not None:
raise StagingDataException("Transaction for user \"{}\" already opened".format(username))
else:
self._usr_journals[username] = UsrChangeJournal(self._data, transaction_opts)
def get_user_journal(self, username: str):
usr_journal = self._usr_journals.get(username)
if usr_journal is not None:
return usr_journal
else:
raise StagingDataException("Transaction for user \"{}\" not opened".format(username))
def drop_user_journal(self, username: str):
usr_journal = self._usr_journals.get(username)
if usr_journal is not None:
del self._usr_journals[username]
else:
raise StagingDataException("Transaction for user \"{}\" not opened".format(username))
# Returns the root node of data tree
def get_data_root_staging(self, username: str) -> InstanceNode:
usr_journal = self.get_user_journal(username)
root = usr_journal.get_root_head()
return root
# Set a new Instance node as data root, store old root to archive
def set_data_root(self, new_root: InstanceNode):
self._data_history.append(self._data)
self._data = new_root
def data_root_rollback(self, history_steps: int, store_current: bool):
if store_current:
self._data_history.append(self._data)
self._data = self._data_history[-history_steps]
def parse_ii(self, path: str, path_format: PathFormat) -> InstanceRoute:
if path_format == PathFormat.URL:
ii = self._dm.parse_resource_id(path)
else:
ii = self._dm.parse_instance_id(path)
return ii
# Get schema node with particular schema address
def get_schema_node(self, sch_pth: str) -> SchemaNode:
sn = self._dm.get_data_node(sch_pth)
if sn is None:
# raise NonexistentSchemaNode(sch_pth)
debug_data("Cannot find schema node for " + sch_pth)
return sn
# Notify data observers about change in datastore
def run_conf_edit_handler(self, ii: InstanceRoute, ch: DataChange):
try:
sch_pth_list = list(filter(lambda n: isinstance(n, MemberName), ii))
if ch.change_type == ChangeType.CREATE:
# Get target member name
input_member_name_fq = tuple(ch.input_data.keys())[0]
input_member_name_ns, input_member_name = input_member_name_fq.split(":", maxsplit=1)
# Append it to ii
sch_pth_list.append(MemberName(input_member_name, None))
sch_pth = DataHelpers.ii2str(sch_pth_list)
sn = self.get_schema_node(sch_pth)
if sn is None:
return
h = CONF_DATA_HANDLES.get_handler(str(id(sn)))
if h is not None:
info("handler for actual data node triggered")
if isinstance(h, ConfDataObjectHandler):
if ch.change_type == ChangeType.CREATE:
h.create(ii, ch)
elif ch.change_type == ChangeType.REPLACE:
h.replace(ii, ch)
elif ch.change_type == ChangeType.DELETE:
h.delete(ii, ch)
if isinstance(h, ConfDataListHandler):
if ch.change_type == ChangeType.CREATE:
h.create_item(ii, ch)
elif ch.change_type == ChangeType.REPLACE:
h.replace_item(ii, ch)
elif ch.change_type == ChangeType.DELETE:
h.delete_item(ii, ch)
else:
sn = sn.parent
while sn is not None:
h = CONF_DATA_HANDLES.get_handler(str(id(sn)))
if h is not None and isinstance(h, ConfDataObjectHandler):
info("handler for superior data node triggered, replace")
# print(h.schema_path)
# print(h.__class__.__name__)
h.replace(ii, ch)
if h is not None and isinstance(h, ConfDataListHandler):
info("handler for superior data node triggered, replace_item")
h.replace_item(ii, ch)
sn = sn.parent
except NonexistentInstance:
warn("Cannnot notify {}, parent container removed".format(ii))
# Get data node, evaluate NACM if required
def get_node_rpc(self, rpc: RpcInfo, staging=False) -> InstanceNode:
ii = self.parse_ii(rpc.path, rpc.path_format)
if staging:
try:
root = self.get_data_root_staging(rpc.username)
except StagingDataException:
# root = self._data
info("Starting transaction for user \"{}\"".format(rpc.username))
self.make_user_journal(rpc.username, None)
root = self.get_data_root_staging(rpc.username)
else:
root = self._data
yl_data_request = False
if (len(ii) > 0) and (isinstance(ii[0], MemberName)):
# Not getting root
ns_first = ii[0].namespace
if (ns_first == "ietf-netconf-acm") and (rpc.username not in CONFIG_NACM["ALLOWED_USERS"]):
raise NacmForbiddenError(rpc.username + " not allowed to access NACM data")
elif ns_first == "ietf-yang-library":
root = self._yang_lib_data
yl_data_request = True
else:
# Root node requested
# Remove NACM data if user is not NACM privieged
if rpc.username not in CONFIG_NACM["ALLOWED_USERS"]:
try:
root = root.delete_item("ietf-netconf-acm:nacm")
except NonexistentInstance:
pass
# Append YANG library data
for member_name, member_val in self._yang_lib_data.value.items():
root = root.put_member(member_name, member_val).top()
# Resolve schema node of the desired data node
sch_pth_list = filter(lambda isel: isinstance(isel, MemberName), ii)
sch_pth = DataHelpers.ii2str(sch_pth_list)
sn = self.get_schema_node(sch_pth)
state_roots = sn.state_roots()
# Check if URL points to state data or node that contains state data
if state_roots and not yl_data_request:
debug_data("State roots: {}".format(state_roots))
n = None
for state_root_sch_pth in state_roots:
state_root_sn = self._dm.get_data_node(state_root_sch_pth)
# Check if the desired node is child of the state root
sni = sn
is_child = False
while sni:
if sni is state_root_sn:
is_child = True
break
sni = sni.parent
if is_child:
# Direct request for the state data
sdh = STATE_DATA_HANDLES.get_handler(state_root_sch_pth)
if sdh is not None:
if isinstance(sdh, StateDataContainerHandler):
state_handler_val = sdh.generate_node(ii, rpc.username, staging)
state_root_n = sdh.schema_node.orphan_instance(state_handler_val)
elif isinstance(sdh, StateDataListHandler):
if (sn is sdh.schema_node) and isinstance(ii[-1], MemberName):
state_handler_val = sdh.generate_list(ii, rpc.username, staging)
state_root_n = sdh.schema_node.orphan_instance(state_handler_val)
else:
state_handler_val = sdh.generate_item(ii, rpc.username, staging)
state_root_n = sdh.schema_node.orphan_entry(state_handler_val)
# Select desired subnode from handler-generated content
ii_prefix, ii_rel = sdh.schema_node.split_instance_route(ii)
n = state_root_n.goto(ii_rel)
# There should be only one state root, no need to continue
if len(state_roots) != 1:
warn("URI points to directly to state data, but more state roots found")
break
else:
raise NoHandlerForStateDataError(rpc.path)
else:
# Request for config data containing state data
n = root.goto(ii)
def _fill_state_roots(node: InstanceNode) -> InstanceNode:
if isinstance(node.value, ObjectValue):
if node.schema_node is state_root_sn.parent:
ii_gen = DataHelpers.node_get_ii(node)
sdh = STATE_DATA_HANDLES.get_handler(state_root_sch_pth)
if sdh is not None:
try:
if isinstance(sdh, StateDataContainerHandler):
state_handler_val = sdh.generate_node(ii_gen, rpc.username, staging)
elif isinstance(sdh, StateDataListHandler):
state_handler_val = sdh.generate_list(ii_gen, rpc.username, staging)
except Exception as e:
error("Error occured in state data generator (sn: {})".format(state_root_sch_pth))
error(epretty(e))
error("This state node will be omitted.")
else:
if state_root_sn.ns == state_root_sn.parent.ns:
nm_name = state_root_sn.qual_name[0]
else:
nm_name = state_root_sn.qual_name[1] + ":" + state_root_sn.qual_name[0]
# print("nm={}".format(nm_name))
node = node.put_member(nm_name, state_handler_val, raw=True).up()
else:
for key in node:
member = node[key]
node = _fill_state_roots(member).up()
elif isinstance(node.value, ArrayValue):
i = 0
arr_len = len(node.value)
while i < arr_len:
node = _fill_state_roots(node[i]).up()
i += 1
return node
n = _fill_state_roots(n)
root = n.top()
else:
# No state data in requested node
n = root.goto(ii)
# Process "with-defaults" query parameter
try:
with_defs = rpc.qs["with-defaults"][0]
except (IndexError, KeyError):
with_defs = None
if with_defs == "report-all":
n = n.add_defaults()
# Evaluate NACM if required
if self.nacm and not rpc.skip_nacm_check:
nrpc = self.nacm.get_user_rules(rpc.username)
if nrpc.check_data_node_permission(root, ii, Permission.NACM_ACCESS_READ) == Action.DENY:
raise NacmForbiddenError()
else:
# Prune nodes that should not be accessible to user
n = nrpc.prune_data_tree(n, root, ii, Permission.NACM_ACCESS_READ)
# Process "depth" query parameter
try:
max_depth_str = rpc.qs["depth"][0]
if max_depth_str == "unbounded":
max_depth = None
else:
max_depth = int(max_depth_str) - 1
if (max_depth < 0) or (max_depth > 65535):
raise ValueError()
except (IndexError, KeyError):
max_depth = None
except ValueError:
raise ValueError("Invalid value of query param \"depth\"")
if max_depth is not None:
def _tree_limit_depth(node: InstanceNode, depth: int) -> InstanceNode:
if isinstance(node.value, ObjectValue):
if depth > max_depth:
node.value = ObjectValue({})
else:
for child_key in sorted(node.value.keys()):
m = node[child_key]
node = _tree_limit_depth(m, depth + 1).up()
elif isinstance(node.value, ArrayValue):
depth -= 1
for i in range(len(node.value)):
e = node[i]
node = _tree_limit_depth(e, depth + 1).up()
return node
n = _tree_limit_depth(n, 1)
# Return result
return n
# Create new data node
def create_node_rpc(self, root: InstanceNode, rpc: RpcInfo, value: Any) -> Tuple[InstanceNode, bool]:
ii = self.parse_ii(rpc.path, rpc.path_format)
# Get target member name
input_member_keys = tuple(value.keys())
if len(input_member_keys) != 1:
raise ValueError("Received json object must contain exactly one member")
input_member_name_fq = input_member_keys[0]
try:
input_member_ns, input_member_name = input_member_name_fq.split(":", maxsplit=1)
except ValueError:
raise ValueError("Input object name must me in fully-qualified format")
input_member_value = value[input_member_name_fq]
# Deny any changes of NACM data for non-privileged users
nacm_changed = False
if (len(ii) > 0) and (isinstance(ii[0], MemberName)):
# Not getting root
ns_first = ii[0].namespace
if ns_first == "ietf-netconf-acm":
nacm_changed = True
if rpc.username not in CONFIG_NACM["ALLOWED_USERS"]:
raise NacmForbiddenError(rpc.username + " not allowed to modify NACM data")
else:
# Editing root node
if input_member_ns == "ietf-netconf-acm":
nacm_changed = True
if rpc.username not in CONFIG_NACM["ALLOWED_USERS"]:
raise NacmForbiddenError(rpc.username + " not allowed to modify NACM data")
# Evaluate NACM
if self.nacm and not rpc.skip_nacm_check:
nrpc = self.nacm.get_user_rules(rpc.username)
if nrpc.check_data_node_permission(root, ii, Permission.NACM_ACCESS_CREATE) == Action.DENY:
raise NacmForbiddenError()
n = root.goto(ii)
# Get target schema node
sn = n.schema_node # type: InternalNode
member_sn = sn.get_child(input_member_name, input_member_ns)
if member_sn is None:
raise ValueError("Received json object contains unknown member")
# Check if target member already exists
if sn.ns == member_sn.ns:
try:
existing_member = n[input_member_name]
except NonexistentInstance:
existing_member = None
else:
try:
existing_member = n[input_member_name_fq]
except NonexistentInstance:
existing_member = None
# Get query parameters
insert = rpc.qs.get("insert", [None])[0]
point = rpc.qs.get("point", [None])[0]
if isinstance(member_sn, ListNode):
# Append received node to list
# Create list if necessary
if existing_member is None:
new_member_name = input_member_name if n.namespace == input_member_ns else input_member_name_fq
existing_member = n.put_member(new_member_name, ArrayValue([]))
# Get ListNode key names
list_node_keys = member_sn.keys # Key names in the form [(key, ns), ]
if insert == "first":
# Optimization
if len(existing_member.value) > 0:
list_entry_first = existing_member[0] # type: ArrayEntry
new_member = list_entry_first.insert_before(input_member_value, raw=True).up()
else:
new_member = existing_member.update([input_member_value], raw=True)
elif (insert == "last") or (insert is None):
# Optimization
if len(existing_member.value) > 0:
list_entry_last = existing_member[-1] # type: ArrayEntry
new_member = list_entry_last.insert_after(input_member_value, raw=True).up()
else:
new_member = existing_member.update([input_member_value], raw=True)
elif (insert == "before") and (point is not None):
point_keys_val = point.split(",") # List key values passed in the "point" query argument
if len(list_node_keys) != len(point_keys_val):
raise ValueError(
"Invalid number of keys passed in 'point' query: {} ({} expected)".format(
len(point_keys_val), len(list_node_keys)
)
)
entry_keys = dict(map(lambda i: (list_node_keys[i], point_keys_val[i]), range(len(list_node_keys))))
entry_sel = EntryKeys(entry_keys)
point_list_entry = entry_sel.goto_step(existing_member) # type: ArrayEntry
new_member = point_list_entry.insert_before(input_member_value, raw=True).up()
elif (insert == "after") and (point is not None):
point_keys_val = point.split(",") # List key values passed in the "point" query argument
if len(list_node_keys) != len(point_keys_val):
raise ValueError(
"Invalid number of keys passed in 'point' query: {} ({} expected)".format(
len(point_keys_val), len(list_node_keys)
)
)
entry_keys = dict(map(lambda i: (list_node_keys[i], point_keys_val[i]), range(len(list_node_keys))))
entry_sel = EntryKeys(entry_keys)
point_list_entry = entry_sel.goto_step(existing_member) # type: ArrayEntry
new_member = point_list_entry.insert_after(input_member_value, raw=True).up()
else:
raise ValueError("Invalid 'insert'/'point' query values")
elif isinstance(member_sn, LeafListNode):
# Append received node to leaf list
# Create leaf list if necessary
if existing_member is None:
new_member_name = input_member_name if n.namespace == input_member_ns else input_member_name_fq
existing_member = n.put_member(new_member_name, ArrayValue([]))
# Convert input data from List/Dict to ArrayValue/ObjectValue
new_value_item = member_sn.entry_from_raw(input_member_value)
if insert == "first":
new_member = existing_member.update(ArrayValue([new_value_item] + existing_member.value))
elif (insert == "last") or (insert is None):
new_member = existing_member.update(ArrayValue(existing_member.value + [new_value_item]))
else:
raise ValueError("Invalid 'insert' query value")
else:
# Create new container member
if existing_member is None:
# Create new node (object member)
new_member_name = input_member_name if n.namespace == input_member_ns else input_member_name_fq
new_member = n.put_member(new_member_name, input_member_value, raw=True)
else:
# Data node already exists
raise InstanceAlreadyPresent("Member \"{}\" already present in \"{}\"".format(input_member_name, ii))
return new_member.top(), nacm_changed
# PUT data node
def update_node_rpc(self, root: InstanceNode, rpc: RpcInfo, value: Any) -> Tuple[InstanceNode, bool]:
ii = self.parse_ii(rpc.path, rpc.path_format)
# Get target member name
input_member_keys = tuple(value.keys())
if len(input_member_keys) != 1:
raise ValueError("Received json object must contain exactly one member")
input_member_name_fq = input_member_keys[0]
try:
input_member_ns, input_member_name = input_member_name_fq.split(":", maxsplit=1)
except ValueError:
raise ValueError("Input object name must me in fully-qualified format")
input_member_value = value[input_member_name_fq]
n = root.goto(ii)
# Deny any changes of NACM data for non-privileged users
nacm_changed = False
if (len(ii) > 0) and (isinstance(ii[0], MemberName)):
# Not getting root
ns_first = ii[0].namespace
if ns_first == "ietf-netconf-acm":
nacm_changed = True
if rpc.username not in CONFIG_NACM["ALLOWED_USERS"]:
raise NacmForbiddenError(rpc.username + " not allowed to modify NACM data")
else:
# Replacing root node
# Check if NACM data are present in the datastore
nacm_val = n.value.get("ietf-netconf-acm:nacm")
if nacm_val is not None:
nacm_changed = True
if rpc.username not in CONFIG_NACM["ALLOWED_USERS"]:
raise NacmForbiddenError(rpc.username + " not allowed to modify NACM data")
# Evaluate NACM
if self.nacm and not rpc.skip_nacm_check:
nrpc = self.nacm.get_user_rules(rpc.username)
if nrpc.check_data_node_permission(root, ii, Permission.NACM_ACCESS_UPDATE) == Action.DENY:
raise NacmForbiddenError()
new_n = n.update(input_member_value, raw=True)
new_n.validate(ValidationScope.syntax)
return new_n.top(), nacm_changed
# Delete data node
def delete_node_rpc(self, root: InstanceNode, rpc: RpcInfo) -> Tuple[InstanceNode, bool]:
ii = self.parse_ii(rpc.path, rpc.path_format)
n = root.goto(ii)
# Deny any changes of NACM data for non-privileged users
nacm_changed = False
if (len(ii) > 0) and (isinstance(ii[0], MemberName)):
# Not getting root
ns_first = ii[0].namespace
if ns_first == "ietf-netconf-acm":
nacm_changed = True
if rpc.username not in CONFIG_NACM["ALLOWED_USERS"]:
raise NacmForbiddenError(rpc.username + " not allowed to modify NACM data")
else:
# Deleting root node
# Check if NACM data are present in the datastore
nacm_val = n.value.get("ietf-netconf-acm:nacm")
if nacm_val is not None:
nacm_changed = True
if rpc.username not in CONFIG_NACM["ALLOWED_USERS"]:
raise NacmForbiddenError(rpc.username + " not allowed to modify NACM data")
# Evaluate NACM
if self.nacm and not rpc.skip_nacm_check:
nrpc = self.nacm.get_user_rules(rpc.username)
if nrpc.check_data_node_permission(root, ii, Permission.NACM_ACCESS_DELETE) == Action.DENY:
raise NacmForbiddenError()
if len(ii) == 0:
# Deleting entire datastore
new_n = RootNode(ObjectValue({}), root.schema_node, datetime.now())
else:
n_parent = n.up()
last_isel = ii[-1]
if isinstance(n_parent.value, ArrayValue):
if isinstance(last_isel, EntryIndex):
new_n = n_parent.delete_item(last_isel.index)
elif isinstance(last_isel, EntryKeys):
new_n = n_parent.delete_item(n.index)
else:
raise ValueError("Unknown node selector")
elif isinstance(n_parent.value, ObjectValue):
new_n = n_parent.delete_item(last_isel.namespace + ":" + last_isel.name if last_isel.namespace else last_isel.name)
else:
raise InstanceValueError(rpc.path, "Invalid target node type")
return new_n.top(), nacm_changed
# Invoke an operation
def invoke_op_rpc(self, rpc: RpcInfo) -> JsonNodeT:
if rpc.op_name.startswith("jetconf:"):
# Jetconf internal operation
op_handler = OP_HANDLERS.get_handler(rpc.op_name)
if op_handler is None:
raise NoHandlerForOpError(rpc.op_name)
ret_data = op_handler(rpc)
else:
# External operation defined in data model
if self.nacm and not rpc.skip_nacm_check:
nrpc = self.nacm.get_user_rules(rpc.username)
if nrpc.check_rpc_name(rpc.op_name) == Action.DENY:
raise NacmForbiddenError(
"Invocation of \"{}\" operation denied for user \"{}\"".format(rpc.op_name, rpc.username)
)
op_handler = OP_HANDLERS.get_handler(rpc.op_name)
if op_handler is None:
raise NoHandlerForOpError(rpc.op_name)
# Get operation input schema
sn = self._dm.get_schema_node(rpc.path)
sn_input = sn.get_child("input")
# Input arguments are expected, this will validate them
op_input_args = sn_input.from_raw(rpc.op_input_args) if sn_input.children else None
try:
ret_data = op_handler(op_input_args, rpc.username)
except Exception as e:
raise OpHandlerFailedError(epretty(e))
return ret_data
def add_to_journal_rpc(self, ch_type: ChangeType, rpc: RpcInfo, value: Optional[JsonNodeT], new_root: InstanceNode, nacm_modified: bool):
usr_journal = self._usr_journals.get(rpc.username)
if usr_journal is not None:
usr_journal.add(DataChange(ch_type, rpc, value, new_root, nacm_modified))
else:
raise NoHandlerError("No active changelist for user \"{}\"".format(rpc.username))
# Locks datastore data
def lock_data(self, username: str = None, blocking: bool=True):
ret = self._data_lock.acquire(blocking=blocking, timeout=1)
if ret:
self._lock_username = username or "(unknown)"
debug_data("Acquired lock in datastore \"{}\" for user \"{}\"".format(self.name, username))
else:
raise DataLockError(
"Failed to acquire lock in datastore \"{}\" for user \"{}\", already locked by \"{}\"".format(
self.name,
username,
self._lock_username
)
)
# Unlock datastore data
def unlock_data(self):
self._data_lock.release()
debug_data("Released lock in datastore \"{}\" for user \"{}\"".format(self.name, self._lock_username))
self._lock_username = None
# Load data from persistent storage
def load(self):
raise NotImplementedError("Not implemented in base class")
# Save data to persistent storage
def save(self):
raise NotImplementedError("Not implemented in base class")
class JsonDatastore(BaseDatastore):
def __init__(self, dm: DataModel, json_file: str, with_nacm: bool=False):
super().__init__(dm, with_nacm)
self.json_file = json_file
def load(self):
self._data = None
with open(self.json_file, "rt") as fp:
self._data = self._dm.from_raw(json.load(fp))
if self.nacm is not None:
self.nacm.update()
def save(self):
with open(self.json_file, "w") as jfd:
json.dump(self._data.raw_value(), jfd, indent=4)
|
|
# Copyright 2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing as T
import re
from ..mesonlib import version_compare
from ..interpreter import CompilerHolder
from ..compilers import CudaCompiler
from . import ModuleObject
from ..interpreterbase import (
flatten, permittedKwargs, noKwargs,
InvalidArguments, FeatureNew
)
class CudaModule(ModuleObject):
@FeatureNew('CUDA module', '0.50.0')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.methods.update({
"min_driver_version": self.min_driver_version,
"nvcc_arch_flags": self.nvcc_arch_flags,
"nvcc_arch_readable": self.nvcc_arch_readable,
})
@noKwargs
def min_driver_version(self, state: 'ModuleState',
args: T.Tuple[str],
kwargs: T.Dict[str, T.Any]) -> str:
argerror = InvalidArguments('min_driver_version must have exactly one positional argument: ' +
'a CUDA Toolkit version string. Beware that, since CUDA 11.0, ' +
'the CUDA Toolkit\'s components (including NVCC) are versioned ' +
'independently from each other (and the CUDA Toolkit as a whole).')
if len(args) != 1 or not isinstance(args[0], str):
raise argerror
cuda_version = args[0]
driver_version_table = [
{'cuda_version': '>=11.2.1', 'windows': '461.09', 'linux': '460.32.03'},
{'cuda_version': '>=11.2.0', 'windows': '460.82', 'linux': '460.27.03'},
{'cuda_version': '>=11.1.1', 'windows': '456.81', 'linux': '455.32'},
{'cuda_version': '>=11.1.0', 'windows': '456.38', 'linux': '455.23'},
{'cuda_version': '>=11.0.3', 'windows': '451.82', 'linux': '450.51.06'},
{'cuda_version': '>=11.0.2', 'windows': '451.48', 'linux': '450.51.05'},
{'cuda_version': '>=11.0.1', 'windows': '451.22', 'linux': '450.36.06'},
{'cuda_version': '>=10.2.89', 'windows': '441.22', 'linux': '440.33'},
{'cuda_version': '>=10.1.105', 'windows': '418.96', 'linux': '418.39'},
{'cuda_version': '>=10.0.130', 'windows': '411.31', 'linux': '410.48'},
{'cuda_version': '>=9.2.148', 'windows': '398.26', 'linux': '396.37'},
{'cuda_version': '>=9.2.88', 'windows': '397.44', 'linux': '396.26'},
{'cuda_version': '>=9.1.85', 'windows': '391.29', 'linux': '390.46'},
{'cuda_version': '>=9.0.76', 'windows': '385.54', 'linux': '384.81'},
{'cuda_version': '>=8.0.61', 'windows': '376.51', 'linux': '375.26'},
{'cuda_version': '>=8.0.44', 'windows': '369.30', 'linux': '367.48'},
{'cuda_version': '>=7.5.16', 'windows': '353.66', 'linux': '352.31'},
{'cuda_version': '>=7.0.28', 'windows': '347.62', 'linux': '346.46'},
]
driver_version = 'unknown'
for d in driver_version_table:
if version_compare(cuda_version, d['cuda_version']):
driver_version = d.get(state.host_machine.system, d['linux'])
break
return driver_version
@permittedKwargs(['detected'])
def nvcc_arch_flags(self, state: 'ModuleState',
args: T.Tuple[T.Union[CompilerHolder, CudaCompiler, str]],
kwargs: T.Dict[str, T.Any]) -> T.List[str]:
nvcc_arch_args = self._validate_nvcc_arch_args(args, kwargs)
ret = self._nvcc_arch_flags(*nvcc_arch_args)[0]
return ret
@permittedKwargs(['detected'])
def nvcc_arch_readable(self, state: 'ModuleState',
args: T.Tuple[T.Union[CompilerHolder, CudaCompiler, str]],
kwargs: T.Dict[str, T.Any]) -> T.List[str]:
nvcc_arch_args = self._validate_nvcc_arch_args(args, kwargs)
ret = self._nvcc_arch_flags(*nvcc_arch_args)[1]
return ret
@staticmethod
def _break_arch_string(s):
s = re.sub('[ \t\r\n,;]+', ';', s)
s = s.strip(';').split(';')
return s
@staticmethod
def _detected_cc_from_compiler(c):
if isinstance(c, CompilerHolder):
c = c.compiler
if isinstance(c, CudaCompiler):
return c.detected_cc
return ''
@staticmethod
def _version_from_compiler(c):
if isinstance(c, CompilerHolder):
c = c.compiler
if isinstance(c, CudaCompiler):
return c.version
if isinstance(c, str):
return c
return 'unknown'
def _validate_nvcc_arch_args(self, args, kwargs):
argerror = InvalidArguments('The first argument must be an NVCC compiler object, or its version string!')
if len(args) < 1:
raise argerror
else:
compiler = args[0]
cuda_version = self._version_from_compiler(compiler)
if cuda_version == 'unknown':
raise argerror
arch_list = [] if len(args) <= 1 else flatten(args[1:])
arch_list = [self._break_arch_string(a) for a in arch_list]
arch_list = flatten(arch_list)
if len(arch_list) > 1 and not set(arch_list).isdisjoint({'All', 'Common', 'Auto'}):
raise InvalidArguments('''The special architectures 'All', 'Common' and 'Auto' must appear alone, as a positional argument!''')
arch_list = arch_list[0] if len(arch_list) == 1 else arch_list
detected = kwargs.get('detected', self._detected_cc_from_compiler(compiler))
detected = flatten([detected])
detected = [self._break_arch_string(a) for a in detected]
detected = flatten(detected)
if not set(detected).isdisjoint({'All', 'Common', 'Auto'}):
raise InvalidArguments('''The special architectures 'All', 'Common' and 'Auto' must appear alone, as a positional argument!''')
return cuda_version, arch_list, detected
def _filter_cuda_arch_list(self, cuda_arch_list, lo=None, hi=None, saturate=None):
"""
Filter CUDA arch list (no codenames) for >= low and < hi architecture
bounds, and deduplicate.
If saturate is provided, architectures >= hi are replaced with saturate.
"""
filtered_cuda_arch_list = []
for arch in cuda_arch_list:
if arch:
if lo and version_compare(arch, '<' + lo):
continue
if hi and version_compare(arch, '>=' + hi):
if not saturate:
continue
arch = saturate
if arch not in filtered_cuda_arch_list:
filtered_cuda_arch_list.append(arch)
return filtered_cuda_arch_list
def _nvcc_arch_flags(self, cuda_version, cuda_arch_list='Auto', detected=''):
"""
Using the CUDA Toolkit version and the target architectures, compute
the NVCC architecture flags.
"""
# Replicates much of the logic of
# https://github.com/Kitware/CMake/blob/master/Modules/FindCUDA/select_compute_arch.cmake
# except that a bug with cuda_arch_list="All" is worked around by
# tracking both lower and upper limits on GPU architectures.
cuda_known_gpu_architectures = ['Fermi', 'Kepler', 'Maxwell'] # noqa: E221
cuda_common_gpu_architectures = ['3.0', '3.5', '5.0'] # noqa: E221
cuda_hi_limit_gpu_architecture = None # noqa: E221
cuda_lo_limit_gpu_architecture = '2.0' # noqa: E221
cuda_all_gpu_architectures = ['3.0', '3.2', '3.5', '5.0'] # noqa: E221
if version_compare(cuda_version, '<7.0'):
cuda_hi_limit_gpu_architecture = '5.2'
if version_compare(cuda_version, '>=7.0'):
cuda_known_gpu_architectures += ['Kepler+Tegra', 'Kepler+Tesla', 'Maxwell+Tegra'] # noqa: E221
cuda_common_gpu_architectures += ['5.2'] # noqa: E221
if version_compare(cuda_version, '<8.0'):
cuda_common_gpu_architectures += ['5.2+PTX'] # noqa: E221
cuda_hi_limit_gpu_architecture = '6.0' # noqa: E221
if version_compare(cuda_version, '>=8.0'):
cuda_known_gpu_architectures += ['Pascal', 'Pascal+Tegra'] # noqa: E221
cuda_common_gpu_architectures += ['6.0', '6.1'] # noqa: E221
cuda_all_gpu_architectures += ['6.0', '6.1', '6.2'] # noqa: E221
if version_compare(cuda_version, '<9.0'):
cuda_common_gpu_architectures += ['6.1+PTX'] # noqa: E221
cuda_hi_limit_gpu_architecture = '7.0' # noqa: E221
if version_compare(cuda_version, '>=9.0'):
cuda_known_gpu_architectures += ['Volta', 'Xavier'] # noqa: E221
cuda_common_gpu_architectures += ['7.0'] # noqa: E221
cuda_all_gpu_architectures += ['7.0', '7.2'] # noqa: E221
# https://docs.nvidia.com/cuda/archive/9.0/cuda-toolkit-release-notes/index.html#unsupported-features
cuda_lo_limit_gpu_architecture = '3.0' # noqa: E221
if version_compare(cuda_version, '<10.0'):
cuda_common_gpu_architectures += ['7.2+PTX'] # noqa: E221
cuda_hi_limit_gpu_architecture = '8.0' # noqa: E221
if version_compare(cuda_version, '>=10.0'):
cuda_known_gpu_architectures += ['Turing'] # noqa: E221
cuda_common_gpu_architectures += ['7.5'] # noqa: E221
cuda_all_gpu_architectures += ['7.5'] # noqa: E221
if version_compare(cuda_version, '<11.0'):
cuda_common_gpu_architectures += ['7.5+PTX'] # noqa: E221
cuda_hi_limit_gpu_architecture = '8.0' # noqa: E221
if version_compare(cuda_version, '>=11.0'):
cuda_known_gpu_architectures += ['Ampere'] # noqa: E221
cuda_common_gpu_architectures += ['8.0'] # noqa: E221
cuda_all_gpu_architectures += ['8.0'] # noqa: E221
# https://docs.nvidia.com/cuda/archive/11.0/cuda-toolkit-release-notes/index.html#deprecated-features
cuda_lo_limit_gpu_architecture = '3.5' # noqa: E221
if version_compare(cuda_version, '<11.1'):
cuda_common_gpu_architectures += ['8.0+PTX'] # noqa: E221
cuda_hi_limit_gpu_architecture = '8.6' # noqa: E221
if version_compare(cuda_version, '>=11.1'):
cuda_common_gpu_architectures += ['8.6', '8.6+PTX'] # noqa: E221
cuda_all_gpu_architectures += ['8.6'] # noqa: E221
if version_compare(cuda_version, '<12.0'):
cuda_hi_limit_gpu_architecture = '9.0' # noqa: E221
if not cuda_arch_list:
cuda_arch_list = 'Auto'
if cuda_arch_list == 'All': # noqa: E271
cuda_arch_list = cuda_known_gpu_architectures
elif cuda_arch_list == 'Common': # noqa: E271
cuda_arch_list = cuda_common_gpu_architectures
elif cuda_arch_list == 'Auto': # noqa: E271
if detected:
if isinstance(detected, list):
cuda_arch_list = detected
else:
cuda_arch_list = self._break_arch_string(detected)
cuda_arch_list = self._filter_cuda_arch_list(cuda_arch_list,
cuda_lo_limit_gpu_architecture,
cuda_hi_limit_gpu_architecture,
cuda_common_gpu_architectures[-1])
else:
cuda_arch_list = cuda_common_gpu_architectures
elif isinstance(cuda_arch_list, str):
cuda_arch_list = self._break_arch_string(cuda_arch_list)
cuda_arch_list = sorted([x for x in set(cuda_arch_list) if x])
cuda_arch_bin = []
cuda_arch_ptx = []
for arch_name in cuda_arch_list:
arch_bin = []
arch_ptx = []
add_ptx = arch_name.endswith('+PTX')
if add_ptx:
arch_name = arch_name[:-len('+PTX')]
if re.fullmatch('[0-9]+\\.[0-9](\\([0-9]+\\.[0-9]\\))?', arch_name):
arch_bin, arch_ptx = [arch_name], [arch_name]
else:
arch_bin, arch_ptx = {
'Fermi': (['2.0', '2.1(2.0)'], []),
'Kepler+Tegra': (['3.2'], []),
'Kepler+Tesla': (['3.7'], []),
'Kepler': (['3.0', '3.5'], ['3.5']),
'Maxwell+Tegra': (['5.3'], []),
'Maxwell': (['5.0', '5.2'], ['5.2']),
'Pascal': (['6.0', '6.1'], ['6.1']),
'Pascal+Tegra': (['6.2'], []),
'Volta': (['7.0'], ['7.0']),
'Xavier': (['7.2'], []),
'Turing': (['7.5'], ['7.5']),
'Ampere': (['8.0'], ['8.0']),
}.get(arch_name, (None, None))
if arch_bin is None:
raise InvalidArguments('Unknown CUDA Architecture Name {}!'
.format(arch_name))
cuda_arch_bin += arch_bin
if add_ptx:
if not arch_ptx:
arch_ptx = arch_bin
cuda_arch_ptx += arch_ptx
cuda_arch_bin = sorted(list(set(cuda_arch_bin)))
cuda_arch_ptx = sorted(list(set(cuda_arch_ptx)))
nvcc_flags = []
nvcc_archs_readable = []
for arch in cuda_arch_bin:
arch, codev = re.fullmatch(
'([0-9]+\\.[0-9])(?:\\(([0-9]+\\.[0-9])\\))?', arch).groups()
if version_compare(arch, '<' + cuda_lo_limit_gpu_architecture):
continue
if version_compare(arch, '>=' + cuda_hi_limit_gpu_architecture):
continue
if codev:
arch = arch.replace('.', '')
codev = codev.replace('.', '')
nvcc_flags += ['-gencode', 'arch=compute_' + codev + ',code=sm_' + arch]
nvcc_archs_readable += ['sm_' + arch]
else:
arch = arch.replace('.', '')
nvcc_flags += ['-gencode', 'arch=compute_' + arch + ',code=sm_' + arch]
nvcc_archs_readable += ['sm_' + arch]
for arch in cuda_arch_ptx:
arch, codev = re.fullmatch(
'([0-9]+\\.[0-9])(?:\\(([0-9]+\\.[0-9])\\))?', arch).groups()
if codev:
arch = codev
if version_compare(arch, '<' + cuda_lo_limit_gpu_architecture):
continue
if version_compare(arch, '>=' + cuda_hi_limit_gpu_architecture):
continue
arch = arch.replace('.', '')
nvcc_flags += ['-gencode', 'arch=compute_' + arch + ',code=compute_' + arch]
nvcc_archs_readable += ['compute_' + arch]
return nvcc_flags, nvcc_archs_readable
def initialize(*args, **kwargs):
return CudaModule(*args, **kwargs)
|
|
"""The test for light device automation."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
def _same_lists(a, b):
if len(a) != len(b):
return False
for d in a:
if d not in b:
return False
return True
async def test_websocket_get_actions(hass, hass_ws_client, device_reg, entity_reg):
"""Test we get the expected conditions from a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_actions = [
{
"domain": "light",
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"domain": "light",
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"domain": "light",
"type": "toggle",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
]
client = await hass_ws_client(hass)
await client.send_json(
{"id": 1, "type": "device_automation/action/list", "device_id": device_entry.id}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
actions = msg["result"]
assert _same_lists(actions, expected_actions)
async def test_websocket_get_conditions(hass, hass_ws_client, device_reg, entity_reg):
"""Test we get the expected conditions from a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": "light",
"type": "is_off",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"condition": "device",
"domain": "light",
"type": "is_on",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
]
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/condition/list",
"device_id": device_entry.id,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
conditions = msg["result"]
assert _same_lists(conditions, expected_conditions)
async def test_websocket_get_triggers(hass, hass_ws_client, device_reg, entity_reg):
"""Test we get the expected triggers from a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": "light",
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"platform": "device",
"domain": "light",
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
]
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/trigger/list",
"device_id": device_entry.id,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
triggers = msg["result"]
assert _same_lists(triggers, expected_triggers)
async def test_websocket_get_action_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get the expected action capabilities for an alarm through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
"alarm_control_panel", "test", "5678", device_id=device_entry.id
)
hass.states.async_set(
"alarm_control_panel.test_5678", "attributes", {"supported_features": 15}
)
expected_capabilities = {
"arm_away": {"extra_fields": []},
"arm_home": {"extra_fields": []},
"arm_night": {"extra_fields": []},
"disarm": {
"extra_fields": [{"name": "code", "optional": True, "type": "string"}]
},
"trigger": {"extra_fields": []},
}
client = await hass_ws_client(hass)
await client.send_json(
{"id": 1, "type": "device_automation/action/list", "device_id": device_entry.id}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
actions = msg["result"]
id = 2
assert len(actions) == 5
for action in actions:
await client.send_json(
{
"id": id,
"type": "device_automation/action/capabilities",
"action": action,
}
)
msg = await client.receive_json()
assert msg["id"] == id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities[action["type"]]
id = id + 1
async def test_websocket_get_bad_action_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no action capabilities for a non existing domain."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/action/capabilities",
"action": {"domain": "beer"},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_websocket_get_no_action_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no action capabilities for a domain with no device action capabilities."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/action/capabilities",
"action": {"domain": "deconz"},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_websocket_get_condition_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get the expected condition capabilities for a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/condition/list",
"device_id": device_entry.id,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
conditions = msg["result"]
id = 2
assert len(conditions) == 2
for condition in conditions:
await client.send_json(
{
"id": id,
"type": "device_automation/condition/capabilities",
"condition": condition,
}
)
msg = await client.receive_json()
assert msg["id"] == id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
id = id + 1
async def test_websocket_get_bad_condition_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no condition capabilities for a non existing domain."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/condition/capabilities",
"condition": {"domain": "beer"},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_websocket_get_no_condition_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no condition capabilities for a domain with no device condition capabilities."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/condition/capabilities",
"condition": {"domain": "deconz"},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_websocket_get_trigger_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get the expected trigger capabilities for a light through websocket."""
await async_setup_component(hass, "device_automation", {})
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/trigger/list",
"device_id": device_entry.id,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
triggers = msg["result"]
id = 2
assert len(triggers) == 2
for trigger in triggers:
await client.send_json(
{
"id": id,
"type": "device_automation/trigger/capabilities",
"trigger": trigger,
}
)
msg = await client.receive_json()
assert msg["id"] == id
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
id = id + 1
async def test_websocket_get_bad_trigger_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no trigger capabilities for a non existing domain."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/trigger/capabilities",
"trigger": {"domain": "beer"},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_websocket_get_no_trigger_capabilities(
hass, hass_ws_client, device_reg, entity_reg
):
"""Test we get no trigger capabilities for a domain with no device trigger capabilities."""
await async_setup_component(hass, "device_automation", {})
expected_capabilities = {}
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "device_automation/trigger/capabilities",
"trigger": {"domain": "deconz"},
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
capabilities = msg["result"]
assert capabilities == expected_capabilities
async def test_automation_with_non_existing_integration(hass, caplog):
"""Test device automation with non existing integration."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {
"platform": "device",
"device_id": "none",
"domain": "beer",
},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert "Integration 'beer' not found" in caplog.text
async def test_automation_with_integration_without_device_action(hass, caplog):
"""Test automation with integration without device action support."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {"device_id": "", "domain": "test"},
}
},
)
assert (
"Integration 'test' does not support device automation actions" in caplog.text
)
async def test_automation_with_integration_without_device_condition(hass, caplog):
"""Test automation with integration without device condition support."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": {
"condition": "device",
"device_id": "none",
"domain": "test",
},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert (
"Integration 'test' does not support device automation conditions"
in caplog.text
)
async def test_automation_with_integration_without_device_trigger(hass, caplog):
"""Test automation with integration without device trigger support."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {
"platform": "device",
"device_id": "none",
"domain": "test",
},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert (
"Integration 'test' does not support device automation triggers" in caplog.text
)
async def test_automation_with_bad_action(hass, caplog):
"""Test automation with bad device action."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {"device_id": "", "domain": "light"},
}
},
)
assert "required key not provided" in caplog.text
async def test_automation_with_bad_condition_action(hass, caplog):
"""Test automation with bad device action."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {"condition": "device", "device_id": "", "domain": "light"},
}
},
)
assert "required key not provided" in caplog.text
async def test_automation_with_bad_condition(hass, caplog):
"""Test automation with bad device condition."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": {"condition": "device", "domain": "light"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert "required key not provided" in caplog.text
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_automation_with_sub_condition(hass, calls):
"""Test automation with device condition under and/or conditions."""
DOMAIN = "light"
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "and",
"conditions": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "is_on",
},
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent2.entity_id,
"type": "is_on",
},
],
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "and {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "or",
"conditions": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "is_on",
},
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent2.entity_id,
"type": "is_on",
},
],
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "or {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert hass.states.get(ent2.entity_id).state == STATE_OFF
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "or event - test_event1"
hass.states.async_set(ent1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(ent2.entity_id, STATE_ON)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "or event - test_event1"
hass.states.async_set(ent1.entity_id, STATE_ON)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 4
assert _same_lists(
[calls[2].data["some"], calls[3].data["some"]],
["or event - test_event1", "and event - test_event1"],
)
async def test_automation_with_bad_sub_condition(hass, caplog):
"""Test automation with bad device condition under and/or conditions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": {
"condition": "and",
"conditions": [{"condition": "device", "domain": "light"}],
},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert "required key not provided" in caplog.text
async def test_automation_with_bad_trigger(hass, caplog):
"""Test automation with bad device trigger."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "device", "domain": "light"},
"action": {"service": "test.automation", "entity_id": "hello.world"},
}
},
)
assert "required key not provided" in caplog.text
async def test_websocket_device_not_found(hass, hass_ws_client):
"""Test calling command with unknown device."""
await async_setup_component(hass, "device_automation", {})
client = await hass_ws_client(hass)
await client.send_json(
{"id": 1, "type": "device_automation/action/list", "device_id": "non-existing"}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert not msg["success"]
assert msg["error"] == {"code": "not_found", "message": "Device not found"}
|
|
#!/usr/bin/env python
# Copyright 2002 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains base classes used to parse and convert arguments.
Instead of importing this module directly, it's preferable to import the
flags package and use the aliases defined at the package level.
"""
import csv
import io
import string
import six
from gflags import _helpers
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __new__(mcs, name, bases, dct):
_helpers.define_both_methods(name, dct, 'Parse', 'parse')
_helpers.define_both_methods(name, dct, 'Type', 'flag_type')
_helpers.define_both_methods(name, dct, 'Convert', 'convert')
return type.__new__(mcs, name, bases, dct)
def __call__(cls, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for cls with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
*args: Positional initializer arguments.
**kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(cls, *args, **kwargs)
else:
instances = cls._instances
key = (cls,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(cls, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(cls, *args)
class ArgumentParser(six.with_metaclass(_ArgumentParserCache, object)):
"""Base class used to parse and convert arguments.
The parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
syntactic_help = ''
def parse(self, argument):
"""Parses the string argument and returns the native value.
By default it returns its argument unmodified.
Args:
argument: string argument passed in the commandline.
Raises:
ValueError: Raised when it fails to parse the argument.
Returns:
The parsed value in native type.
"""
return argument
def flag_type(self):
"""Returns a string representing the type of the flag."""
return 'string'
def _custom_xml_dom_elements(self, doc): # pylint: disable=unused-argument
"""Returns a list of XML DOM elements to add additional flag information.
Args:
doc: A minidom.Document, the DOM document it should create nodes from.
Returns:
A list of minidom.Element.
"""
return []
class _ArgumentSerializerMeta(type):
def __new__(mcs, name, bases, dct):
_helpers.define_both_methods(name, dct, 'Serialize', 'serialize')
return type.__new__(mcs, name, bases, dct)
class ArgumentSerializer(six.with_metaclass(_ArgumentSerializerMeta, object)):
"""Base class for generating string representations of a flag value."""
def serialize(self, value):
return _helpers.StrOrUnicode(value)
class NumericParser(ArgumentParser):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
def is_outside_bounds(self, val):
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def parse(self, argument):
val = self.convert(argument)
if self.is_outside_bounds(val):
raise ValueError('%s is not %s' % (val, self.syntactic_help))
return val
def _custom_xml_dom_elements(self, doc):
elements = []
if self.lower_bound is not None:
elements.append(_helpers.CreateXMLDOMElement(
doc, 'lower_bound', self.lower_bound))
if self.upper_bound is not None:
elements.append(_helpers.CreateXMLDOMElement(
doc, 'upper_bound', self.upper_bound))
return elements
def convert(self, argument):
"""Default implementation: always returns its argument unmodified."""
return argument
class FloatParser(NumericParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = 'a'
number_name = 'number'
syntactic_help = ' '.join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = 'a non-negative %s' % self.number_name
elif upper_bound == 0:
sh = 'a non-positive %s' % self.number_name
elif upper_bound is not None:
sh = '%s <= %s' % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = '%s >= %s' % (self.number_name, lower_bound)
self.syntactic_help = sh
def convert(self, argument):
"""Converts argument to a float; raises ValueError on errors."""
return float(argument)
def flag_type(self):
return 'float'
class IntegerParser(NumericParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = 'an'
number_name = 'integer'
syntactic_help = ' '.join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = 'a positive %s' % self.number_name
elif upper_bound == -1:
sh = 'a negative %s' % self.number_name
elif lower_bound == 0:
sh = 'a non-negative %s' % self.number_name
elif upper_bound == 0:
sh = 'a non-positive %s' % self.number_name
elif upper_bound is not None:
sh = '%s <= %s' % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = '%s >= %s' % (self.number_name, lower_bound)
self.syntactic_help = sh
def convert(self, argument):
if isinstance(argument, str):
base = 10
if len(argument) > 2 and argument[0] == '0':
if argument[1] == 'o':
base = 8
elif argument[1] == 'x':
base = 16
return int(argument, base)
else:
return int(argument)
def flag_type(self):
return 'int'
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def convert(self, argument):
"""Converts the argument to a boolean; raise ValueError on errors."""
if isinstance(argument, str):
if argument.lower() in ['true', 't', '1']:
return True
elif argument.lower() in ['false', 'f', '0']:
return False
bool_argument = bool(argument)
if argument == bool_argument:
# The argument is a valid boolean (True, False, 0, or 1), and not just
# something that always converts to bool (list, string, int, etc.).
return bool_argument
raise ValueError('Non-boolean argument to boolean flag', argument)
def parse(self, argument):
val = self.convert(argument)
return val
def flag_type(self):
return 'bool'
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set).
If enum_values (see below) is not specified, any string is allowed.
"""
def __init__(self, enum_values=None, case_sensitive=True):
"""Initialize EnumParser.
Args:
enum_values: Array of values in the enum.
case_sensitive: Whether or not the enum is to be case-sensitive.
"""
super(EnumParser, self).__init__()
self.enum_values = enum_values
self.case_sensitive = case_sensitive
def parse(self, argument):
"""Determine validity of argument and return the correct element of enum.
If self.enum_values is empty, then all arguments are valid and argument
will be returned.
Otherwise, if argument matches an element in enum, then the first
matching element will be returned.
Args:
argument: The supplied flag value.
Returns:
The matching element from enum_values, or argument if enum_values is
empty.
Raises:
ValueError: enum_values was non-empty, but argument didn't match
anything in enum.
"""
if not self.enum_values:
return argument
elif self.case_sensitive:
if argument not in self.enum_values:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return argument
else:
if argument.upper() not in [value.upper() for value in self.enum_values]:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return [value for value in self.enum_values
if value.upper() == argument.upper()][0]
def flag_type(self):
return 'string enum'
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def serialize(self, value):
return self.list_sep.join([_helpers.StrOrUnicode(x) for x in value])
class CsvListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def serialize(self, value):
"""Serialize a list as a string, if possible, or as a unicode string."""
if six.PY2:
# In Python2 csv.writer doesn't accept unicode, so we convert to UTF-8.
output = io.BytesIO()
csv.writer(output).writerow([unicode(x).encode('utf-8') for x in value])
serialized_value = output.getvalue().decode('utf-8').strip()
else:
# In Python3 csv.writer expects a text stream.
output = io.StringIO()
csv.writer(output).writerow([str(x) for x in value])
serialized_value = output.getvalue().strip()
# We need the returned value to be pure ascii or Unicodes so that
# when the xml help is generated they are usefully encodable.
return _helpers.StrOrUnicode(serialized_value)
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = 'a %s separated list' % self._name
def parse(self, argument):
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
return [s.strip() for s in argument.split(self._token)]
def flag_type(self):
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
BaseListParser.__init__(self, ',', 'comma')
def parse(self, argument):
"""Override to support full CSV syntax."""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
try:
return [s.strip() for s in list(csv.reader([argument], strict=True))[0]]
except csv.Error as e:
# Provide a helpful report for case like
# --listflag="$(printf 'hello,\nworld')"
# IOW, list flag values containing naked newlines. This error
# was previously "reported" by allowing csv.Error to
# propagate.
raise ValueError('Unable to parse the value %r as a %s: %s'
% (argument, self.flag_type(), e))
def _custom_xml_dom_elements(self, doc):
elements = super(ListParser, self)._custom_xml_dom_elements(doc)
elements.append(_helpers.CreateXMLDOMElement(
doc, 'list_separator', repr(',')))
return elements
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self, comma_compat=False):
"""Initializer.
Args:
comma_compat: bool - Whether to support comma as an additional separator.
If false then only whitespace is supported. This is intended only for
backwards compatibility with flags that used to be comma-separated.
"""
self._comma_compat = comma_compat
name = 'whitespace or comma' if self._comma_compat else 'whitespace'
BaseListParser.__init__(self, None, name)
def parse(self, argument):
"""Override to support comma compatibility."""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
if self._comma_compat:
argument = argument.replace(',', ' ')
return argument.split()
def _custom_xml_dom_elements(self, doc):
elements = super(WhitespaceSeparatedListParser, self
)._custom_xml_dom_elements(doc)
separators = list(string.whitespace)
if self._comma_compat:
separators.append(',')
separators.sort()
for sep_char in separators:
elements.append(_helpers.CreateXMLDOMElement(
doc, 'list_separator', repr(sep_char)))
return elements
|
|
#!/usr/bin/env python
############################################################
# ConfigScanner - A buildbot config scanner and updater #
# Also does ReviewBoard (and at some point Bugzilla?) #
# Built for Python 3, works with 2.7 with a few tweaks #
############################################################
SVN='/usr/bin/svn'
BUILDBOT='/x1/buildmaster/bin/buildbot'
buildbotDir = "/x1/buildmaster/master1"
blamelist = ["users@infra.apache.org"]
# SMTP Lib
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from smtplib import SMTPException
# Threading
from threading import Thread
from datetime import datetime
# Rest
import sys, os
import argparse, grp, pwd, shutil
version = 2
if sys.hexversion < 0x03000000:
print("Using Python 2...")
import json, httplib, urllib, urllib2, re, base64, sys, os, time, atexit, signal, logging, socket, subprocess
socket._fileobject.default_bufsize = 0
else:
print("Using Python 3")
version = 3
import json, httplib2, http.client, urllib.request, urllib.parse, re, base64, sys, os, time, atexit, signal, logging, subprocess
PROJECTS_CONF = ('infrastructure/buildbot/aegis/buildmaster/master1/projects/'
'projects.conf')
############################################
# Get path, set up logging and read config #
############################################
debug = False
logging.basicConfig(filename='configscanner.log', format='[%(asctime)s]: %(message)s', level=logging.INFO)
path = os.path.dirname(sys.argv[0])
if len(path) == 0:
path = "."
def sendEmail(rcpt, subject, message):
sender = "<buildbot@buildbot-vm.apache.org>"
receivers = [rcpt]
msg = """From: %s
To: %s
Subject: %s
%s
With regards,
BuildBot
""" % (sender, rcpt, subject, message)
try:
smtpObj = smtplib.SMTP("localhost")
smtpObj.sendmail(sender, receivers, msg)
except SMTPException:
raise Exception("Could not send email - SMTP server down??")
####################
# Helper functions #
####################
# read_chunk: iterator for reading chunks from the stream
# since this is all handled via urllib now, this is quite rudimentary
def read_chunk(req):
while True:
try:
line = req.readline().strip()
if line:
yield line
else:
print("No more lines?")
break
except Exception as info:
logging.warning("Error reading from stream: %s", info)
break
return
#########################
# Main listener program #
#########################
# PubSub class: handles connecting to a pubsub service and checking commits
class PubSubClient(object):
def start(self):
broken = False
while True:
logging.info("Connecting to " + self.url + "...")
self.req = None
while not self.req:
try:
if version == 3:
self.req = urllib.request.urlopen(self.url, None, 30)
else:
self.req = urllib2.urlopen(self.url, None, 30)
logging.info("Connected to " + self.url + ", reading stream")
except:
logging.warning("Could not connect to %s, retrying in 30 seconds..." % self.url)
time.sleep(30)
continue
for line in read_chunk(self.req):
if version == 3:
line = str( line, encoding='ascii' ).rstrip('\r\n,').replace('\x00','') # strip away any old pre-0.9 commas from gitpubsub chunks and \0 in svnpubsub chunks
else:
line = str( line ).rstrip('\r\n,').replace('\x00','') # strip away any old pre-0.9 commas from gitpubsub chunks and \0 in svnpubsub chunks
try:
obj = json.loads(line)
if "commit" in obj and "repository" in obj['commit']:
logging.info("Found a commit in %s", obj['commit']['repository'])
if obj['commit']['repository'] == "git":
# grab some vars
commit = obj['commit']
project = commit['project']
body = commit['body']
sha = commit['sha']
ssha = commit['hash']
author = commit['author']
email = commit['email']
ref = commit['ref']
# If it's not git (and not JIRA), it must be subversion
elif obj['commit']['repository']:
#Grab some vars
commit = obj['commit']
body = commit['log']
svnuser = commit['committer']
revision = commit['id']
email = svnuser + "@apache.org"
os.chdir(buildbotDir)
# get current revision; assumed good
# we do this in outer try block as failure is fatal
# --show-item not supported by current SVN client
#before=subprocess.check_output([SVN,'info','--show-item','last-changed-revision', 'projects']).rstrip()
# Use this instead until SVN is updated
before=re.search(r"^Last Changed Rev: (\d+)$", subprocess.check_output([SVN,'info', 'projects']), re.M).group(1)
for path in commit['changed']:
m = re.match(r"infrastructure/buildbot/aegis/buildmaster/master1/projects/(.+\.conf)", path)
if m:
# N.B. this loop only runs on first match as it processes the entire revision at once
time.sleep(3) # why do we wait here?
logging.info("Validating new revision %s (was %s)" % (revision, before))
os.environ['HOME'] = '/x1/buildmaster' # where SVN settings are found
try:
logging.info("Checking out new config")
subprocess.check_output([SVN, 'update', '-r', "%u" % revision, 'projects'])
logging.info("Running config check")
subprocess.check_output([BUILDBOT, "checkconfig"], stderr=subprocess.STDOUT)
logging.info("Check passed, apply the new config")
subprocess.check_output([BUILDBOT, "reconfig"], stderr=subprocess.STDOUT)
if broken: # has this fixed a broken config?
broken = False
blamelist.append(email)
try: # Don't let mail failure cause the update to be treated as failed
for rec in blamelist:
sendEmail(
rec,
"Buildbot configuration back to normal in %s" % revision,
"Looks like things got fixed, yay!"
)
except Exception as e:
logging.warning("Failed to send recovery mail: %s", e)
blamelist.remove(email)
except subprocess.CalledProcessError as err:
broken = True
logging.warning("Config check returned code %i" % err.returncode)
logging.warning(err.output)
# Do this first in case mail fails
logging.info("Cleaning up...")
subprocess.call([SVN, 'update', '-r', before, 'projects'])
blamelist.append(email)
out = """
The error(s) below happened while validating the committed changes.
As a precaution, this commit has not yet been applied to BuildBot.
Please correct the below and commit your fixes:
%s
""" % err.output
for rec in blamelist:
sendEmail(
rec,
"Buildbot configuration failure in %s" % revision,
out
)
blamelist.remove(email)
logging.info("All done, back to listening for changes :)")
break # we process the whole revision on the first match
except (ValueError, Exception) as detail:
logging.warning("Bad JSON or something: %s", detail)
continue
logging.info("Disconnected from %s, reconnecting" % self.url)
################
# Main program #
################
def main():
# Start the svn thread
svn_thread = PubSubClient()
svn_thread.url = "http://svn-master.apache.org:2069/commits/*"
svn_thread.start()
### Run the thing ###
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Command line options.')
parser.add_argument('--user', dest='user', type=str, nargs=1,
help='Optional user to run ConfigScanner as')
parser.add_argument('--group', dest='group', type=str, nargs=1,
help='Optional group to run ConfigScanner as')
parser.add_argument('--pidfile', dest='pidfile', type=str, nargs=1,
help='Optional pid file location')
parser.add_argument('--daemonize', dest='daemon', action='store_true',
help='Run as a daemon')
parser.add_argument('--stop', dest='kill', action='store_true',
help='Kill the currently running ConfigScanner process')
args = parser.parse_args()
if args.group and len(args.group) > 0:
gid = grp.getgrnam(args.group[0])
os.setgid(gid[2])
logging.getLogger().addHandler(logging.StreamHandler())
main()
|
|
#!/usr/bin/env python2
# ENCODE_map 0.0.1
import os
import subprocess
import shlex
from multiprocessing import cpu_count
import logging
import sys
import json
logger = logging.getLogger(__name__)
logger.propagate = False
logger.setLevel(logging.INFO)
BWA_PATH = "bwa"
TRIMMOMATIC_PATH = "/".join([
os.getenv('TRIMMOMATIC_HOME', "."),
"trimmomatic-0.36.jar"
])
# the order of this list is important.
# strip_extensions strips from the right inward, so
# the expected right-most extensions should appear first (like .gz)
STRIP_EXTENSIONS = ['.gz', '.fq', '.fastq', '.fa', '.fasta']
def strip_extensions(filename, extensions):
basename = filename.split('/')[-1]
for extension in extensions:
basename = basename.rpartition(extension)[0] or basename
return basename
def resolve_reference(reference_tar_filename, reference_dirname):
if reference_tar_filename.endswith('.gz') or reference_tar_filename.endswith('.tgz'):
tar_command = \
'tar -xzv --no-same-owner --no-same-permissions -C %s -f %s' \
% (reference_dirname, reference_tar_filename)
else:
tar_command = \
'tar -xv --no-same-owner --no-same-permissions -C %s -f %s' \
% (reference_dirname, reference_tar_filename)
logger.info("Unpacking %s with %s" % (reference_tar_filename, tar_command))
print(subprocess.check_output(shlex.split(tar_command)))
# assume the reference file is the only .fa or .fna file
filename = next((f for f in os.listdir(reference_dirname) if f.endswith('.fa') or f.endswith('.fna') or f.endswith('.fa.gz') or f.endswith('.fna.gz')), None)
return '/'.join([reference_dirname, filename])
def crop(reads1_file, reads2_file, crop_length, debug):
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
if crop_length == 'native':
local_copy_reads1 = reads1_file.split('/')[-1]
subprocess.check_output(shlex.split('cp %s %s' % reads1_file, local_copy_reads1 ))
local_copy_reads2 = None
if reads2_file:
local_copy_reads2 = reads2_file.split('/')[-1]
subprocess.check_output(shlex.split('cp %s %s' % reads2_file, local_copy_reads2 ))
output = dict(zip(
["cropped_reads1", "cropped_reads2"], [local_copy_reads1, local_copy_reads2]))
else:
reads1_filename = reads1_file
reads1_basename = strip_extensions(reads1_filename, STRIP_EXTENSIONS)
if reads2_file:
end_string = "PE"
reads2_filename = reads2_file
reads2_basename = \
strip_extensions(reads2_filename, STRIP_EXTENSIONS)
output_fwd_paired_filename = reads1_basename + '-crop-paired.fq.gz'
output_fwd_unpaired_filename = \
reads1_basename + '-crop-unpaired.fq.gz'
output_rev_paired_filename = reads2_basename + '-crop-paired.fq.gz'
output_rev_unpaired_filename = \
reads2_basename + '-crop-unpaired.fq.gz'
SE_output_filename = None
else:
end_string = "SE"
reads2_filename = None
reads2_basename = None
output_fwd_paired_filename = None
output_fwd_unpaired_filename = None
output_rev_paired_filename = None
output_rev_unpaired_filename = None
SE_output_filename = reads1_basename + "-crop.fq.gz"
crop_command = ' '.join([s for s in [
'java -jar',
TRIMMOMATIC_PATH,
end_string,
'-threads %d' % (cpu_count()),
reads1_filename,
reads2_filename,
SE_output_filename,
output_fwd_paired_filename,
output_fwd_unpaired_filename,
output_rev_paired_filename,
output_rev_unpaired_filename,
'MINLEN:%s' % (crop_length),
'CROP:%s' % (crop_length)]
if s])
# logger.info("Cropping with: %s" % (crop_command))
#
print("Cropping with: %s" % (crop_command))
print(subprocess.check_output(shlex.split(crop_command)))
print(subprocess.check_output(shlex.split('ls -l')))
if SE_output_filename:
SE_output = SE_output_filename
cropped_reads = [SE_output]
else:
output_fwd_paired = output_fwd_paired_filename
output_rev_paired = output_rev_paired_filename
cropped_reads = [
output_fwd_paired,
output_rev_paired]
output = dict(zip(["cropped_reads1", "cropped_reads2"], cropped_reads))
logger.info("returning from crop with output %s" % (output))
return output
def process(reads_file, reference_tar, bwa_aln_params, debug):
# reads_file, reference_tar should be links to file objects.
# reference_tar should be a tar of files generated by bwa index and
# the tar should be uncompressed to avoid repeating the decompression.
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
bwa = BWA_PATH
logger.info("In process with bwa %s" % (bwa))
# Generate filename strings and download the files to the local filesystem
reads_filename = reads_file
reads_basename = strip_extensions(reads_filename, STRIP_EXTENSIONS)
reference_tar_filename = reference_tar
reference_dirname = '.'
reference_filename = \
resolve_reference(reference_tar_filename, reference_dirname)
logger.info("Using reference file: %s" % (reference_filename))
print(subprocess.check_output('ls -l', shell=True))
# generate the suffix array index file
sai_filename = '%s.sai' % (reads_basename)
with open(sai_filename, 'w') as sai_file:
# Build the bwa command and call bwa
bwa_command = "%s aln %s -t %d %s %s" \
% (bwa, bwa_aln_params, cpu_count(),
reference_filename, reads_filename)
logger.info("Running bwa with %s" % (bwa_command))
subprocess.check_call(shlex.split(bwa_command), stdout=sai_file)
print(subprocess.check_output('ls -l', shell=True))
# Upload the output to the DNAnexus project
logger.info("Uploading suffix array %s" % (sai_filename))
sai_dxfile = sai_filename
output = {"suffix_array_index": sai_dxfile}
logger.info("Returning from process with %s" % (output))
return output
# always only read1, because each end is mapped independently
# probbaly should update code accordingly
def main(crop_length, reference_tar,
bwa_aln_params, debug, reads1, reads2):
# Main entry-point. Parameter defaults assumed to come from dxapp.json.
# reads1, reference_tar, reads2 are links to DNAnexus files or None
# create a file handler
handler = logging.FileHandler('mapping.log')
if debug:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
logger.addHandler(handler)
# This spawns only one or two subjobs for single- or paired-end,
# respectively. It could also download the files, chunk the reads,
# and spawn multiple subjobs.
# Files are downloaded later by subjobs into their own filesystems
# and uploaded to the project.
# Initialize file handlers for input files.
paired_end = reads2 is not None
if crop_length == 'native':
crop_subjob = None
local_copy_reads1 = reads1.split('/')[-1]
subprocess.check_output(shlex.split('cp %s %s' % (reads1, local_copy_reads1)))
local_copy_reads2 = None
if paired_end:
local_copy_reads2 = reads2.split('/')[-1]
subprocess.check_output(shlex.split('cp %s %s' % (reads2, local_copy_reads2)))
unmapped_reads = [local_copy_reads1, local_copy_reads2]
else:
crop_subjob_input = {
"reads1_file": reads1,
"reads2_file": reads2,
"crop_length": crop_length,
"debug": debug
}
logger.info("Crop job input: %s" % (crop_subjob_input))
crop_subjob = crop(reads1, reads2, crop_length, debug)
unmapped_reads = [crop_subjob.get("cropped_reads1")]
if paired_end:
unmapped_reads.append(crop_subjob.get("cropped_reads2"))
else:
unmapped_reads.append(None)
unmapped_reads = [r for r in unmapped_reads if r]
for reads in unmapped_reads:
mapping_subjob_input = {
"reads_file": reads,
"reference_tar": reference_tar,
"bwa_aln_params": bwa_aln_params,
"debug": debug
}
logger.info("Mapping job input: %s" % (mapping_subjob_input))
process(reads, reference_tar, bwa_aln_params, debug)
output = {
"reference": reference_tar,
"reads1_file": reads1,
"reads2_file": reads2,
"crop_length": crop_length,
"paired_end": paired_end
}
with open('mapping.json', 'w') as f:
json.dump(output, f, sort_keys=True, indent=4, separators=(',', ': '))
logger.info("Exiting mapping with output: %s" % (output))
return output
if __name__ == '__main__':
if len(sys.argv) == 4:
main(sys.argv[2], sys.argv[1], "-q 5 -l 32 -k 2", False, sys.argv[3], None)
else:
main(sys.argv[2], sys.argv[1], "-q 5 -l 32 -k 2", False, sys.argv[3], sys.argv[4])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.