gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the Swift backend store"""
import copy
import fixtures
import hashlib
import httplib
import mock
import tempfile
import uuid
from oslo_config import cfg
from oslo_utils import units
from oslotest import moxstubout
import requests_mock
import six
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
import StringIO
import swiftclient
from glance_store._drivers.swift import store as swift
from glance_store import backend
from glance_store import BackendException
from glance_store import capabilities
from glance_store.common import auth
from glance_store.common import utils
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
from tests.unit import test_store_capabilities
CONF = cfg.CONF
FAKE_UUID = lambda: str(uuid.uuid4())
FAKE_UUID2 = lambda: str(uuid.uuid4())
Store = swift.Store
FIVE_KB = 5 * units.Ki
FIVE_GB = 5 * units.Gi
MAX_SWIFT_OBJECT_SIZE = FIVE_GB
SWIFT_PUT_OBJECT_CALLS = 0
SWIFT_CONF = {'swift_store_auth_address': 'localhost:8080',
'swift_store_container': 'glance',
'swift_store_user': 'user',
'swift_store_key': 'key',
'swift_store_auth_address': 'localhost:8080',
'swift_store_container': 'glance',
'swift_store_retry_get_count': 1,
'default_swift_reference': 'ref1'
}
# We stub out as little as possible to ensure that the code paths
# between swift and swiftclient are tested
# thoroughly
def stub_out_swiftclient(stubs, swift_store_auth_version):
fixture_containers = ['glance']
fixture_container_headers = {}
fixture_headers = {
'glance/%s' % FAKE_UUID: {
'content-length': FIVE_KB,
'etag': 'c2e5db72bd7fd153f53ede5da5a06de3'
},
'glance/%s' % FAKE_UUID2: {'x-static-large-object': 'true', },
}
fixture_objects = {'glance/%s' % FAKE_UUID: six.StringIO("*" * FIVE_KB),
'glance/%s' % FAKE_UUID2: six.StringIO("*" * FIVE_KB), }
def fake_head_container(url, token, container, **kwargs):
if container not in fixture_containers:
msg = "No container %s found" % container
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
return fixture_container_headers
def fake_put_container(url, token, container, **kwargs):
fixture_containers.append(container)
def fake_post_container(url, token, container, headers, http_conn=None):
for key, value in six.iteritems(headers):
fixture_container_headers[key] = value
def fake_put_object(url, token, container, name, contents, **kwargs):
# PUT returns the ETag header for the newly-added object
# Large object manifest...
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS += 1
CHUNKSIZE = 64 * units.Ki
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
if kwargs.get('headers'):
etag = kwargs['headers']['ETag']
manifest = kwargs.get('headers').get('X-Object-Manifest')
fixture_headers[fixture_key] = {'manifest': True,
'etag': etag,
'x-object-manifest': manifest}
fixture_objects[fixture_key] = None
return etag
if hasattr(contents, 'read'):
fixture_object = six.StringIO()
chunk = contents.read(CHUNKSIZE)
checksum = hashlib.md5()
while chunk:
fixture_object.write(chunk)
checksum.update(chunk)
chunk = contents.read(CHUNKSIZE)
etag = checksum.hexdigest()
else:
fixture_object = six.StringIO(contents)
etag = hashlib.md5(fixture_object.getvalue()).hexdigest()
read_len = fixture_object.len
if read_len > MAX_SWIFT_OBJECT_SIZE:
msg = ('Image size:%d exceeds Swift max:%d' %
(read_len, MAX_SWIFT_OBJECT_SIZE))
raise swiftclient.ClientException(
msg, http_status=httplib.REQUEST_ENTITY_TOO_LARGE)
fixture_objects[fixture_key] = fixture_object
fixture_headers[fixture_key] = {
'content-length': read_len,
'etag': etag}
return etag
else:
msg = ("Object PUT failed - Object with key %s already exists"
% fixture_key)
raise swiftclient.ClientException(msg,
http_status=httplib.CONFLICT)
def fake_get_object(url, token, container, name, **kwargs):
# GET returns the tuple (list of headers, file object)
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
msg = "Object GET failed"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
byte_range = None
headers = kwargs.get('headers', dict())
if headers is not None:
headers = dict((k.lower(), v) for k, v in six.iteritems(headers))
if 'range' in headers:
byte_range = headers.get('range')
fixture = fixture_headers[fixture_key]
if 'manifest' in fixture:
# Large object manifest... we return a file containing
# all objects with prefix of this fixture key
chunk_keys = sorted([k for k in fixture_headers.keys()
if k.startswith(fixture_key) and
k != fixture_key])
result = six.StringIO()
for key in chunk_keys:
result.write(fixture_objects[key].getvalue())
else:
result = fixture_objects[fixture_key]
if byte_range is not None:
start = int(byte_range.split('=')[1].strip('-'))
result = six.StringIO(result.getvalue()[start:])
fixture_headers[fixture_key]['content-length'] = len(
result.getvalue())
return fixture_headers[fixture_key], result
def fake_head_object(url, token, container, name, **kwargs):
# HEAD returns the list of headers for an object
try:
fixture_key = "%s/%s" % (container, name)
return fixture_headers[fixture_key]
except KeyError:
msg = "Object HEAD failed - Object does not exist"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
def fake_delete_object(url, token, container, name, **kwargs):
# DELETE returns nothing
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
msg = "Object DELETE failed - Object does not exist"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
else:
del fixture_headers[fixture_key]
del fixture_objects[fixture_key]
def fake_http_connection(*args, **kwargs):
return None
def fake_get_auth(url, user, key, auth_version, **kwargs):
if url is None:
return None, None
if 'http' in url and '://' not in url:
raise ValueError('Invalid url %s' % url)
# Check the auth version against the configured value
if swift_store_auth_version != auth_version:
msg = 'AUTHENTICATION failed (version mismatch)'
raise swiftclient.ClientException(msg)
return None, None
stubs.Set(swiftclient.client,
'head_container', fake_head_container)
stubs.Set(swiftclient.client,
'put_container', fake_put_container)
stubs.Set(swiftclient.client,
'post_container', fake_post_container)
stubs.Set(swiftclient.client,
'put_object', fake_put_object)
stubs.Set(swiftclient.client,
'delete_object', fake_delete_object)
stubs.Set(swiftclient.client,
'head_object', fake_head_object)
stubs.Set(swiftclient.client,
'get_object', fake_get_object)
stubs.Set(swiftclient.client,
'get_auth', fake_get_auth)
stubs.Set(swiftclient.client,
'http_connection', fake_http_connection)
class SwiftTests(object):
@property
def swift_store_user(self):
return 'tenant:user1'
def test_get_size(self):
"""
Test that we can get the size of an object in the swift store
"""
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = location.get_location_from_uri(uri, conf=self.conf)
image_size = self.store.get_size(loc)
self.assertEqual(image_size, 5120)
def test_get_size_with_multi_tenant_on(self):
"""Test that single tenant uris work with multi tenant on."""
uri = ("swift://%s:key@auth_address/glance/%s" %
(self.swift_store_user, FAKE_UUID))
self.config(swift_store_multi_tenant=True)
# NOTE(markwash): ensure the image is found
ctxt = mock.MagicMock()
size = backend.get_size_from_backend(uri, context=ctxt)
self.assertEqual(size, 5120)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks."""
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = location.get_location_from_uri(uri, conf=self.conf)
(image_swift, image_size) = self.store.get(loc)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_with_retry(self):
"""
Test a retrieval where Swift does not get the full image in a single
request.
"""
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = location.get_location_from_uri(uri, conf=self.conf)
ctxt = mock.MagicMock()
(image_swift, image_size) = self.store.get(loc, context=ctxt)
resp_full = ''.join([chunk for chunk in image_swift.wrapped])
resp_half = resp_full[:len(resp_full) / 2]
image_swift.wrapped = swift.swift_retry_iter(resp_half, image_size,
self.store,
loc.store_location,
ctxt)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_with_http_auth(self):
"""
Test a retrieval from Swift with an HTTP authurl. This is
specified either via a Location header with swift+http:// or using
http:// in the swift_store_auth_address config value
"""
loc = location.get_location_from_uri(
"swift+http://%s:key@auth_address/glance/%s" %
(self.swift_store_user, FAKE_UUID), conf=self.conf)
ctxt = mock.MagicMock()
(image_swift, image_size) = self.store.get(loc, context=ctxt)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_non_existing(self):
"""
Test that trying to retrieve a swift that doesn't exist
raises an error
"""
loc = location.get_location_from_uri(
"swift://%s:key@authurl/glance/noexist" % (self.swift_store_user),
conf=self.conf)
self.assertRaises(exceptions.NotFound,
self.store.get,
loc)
@mock.patch('glance_store._drivers.swift.utils'
'.is_multiple_swift_store_accounts_enabled',
mock.Mock(return_value=False))
def test_add(self):
"""Test that we can add an image via the swift backend."""
reload(swift)
self.store = Store(self.conf)
self.store.configure()
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = "swift+https://tenant%%3Auser1:key@localhost:8080/glance/%s"
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
loc, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting a single object to be created on Swift i.e. no chunking.
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = location.get_location_from_uri(expected_location, conf=self.conf)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_multi_store(self):
conf = copy.deepcopy(SWIFT_CONF)
conf['default_swift_reference'] = 'store_2'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_image_id = str(uuid.uuid4())
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
loc = 'swift+config://store_2/glance/%s'
expected_location = loc % (expected_image_id)
location, size, checksum, arg = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
@mock.patch('glance_store._drivers.swift.utils'
'.is_multiple_swift_store_accounts_enabled',
mock.Mock(return_value=True))
def test_add_auth_url_variations(self):
"""
Test that we can add an image via the swift backend with
a variety of different auth_address values
"""
conf = copy.deepcopy(SWIFT_CONF)
self.config(**conf)
variations = {
'store_4': 'swift+config://store_4/glance/%s',
'store_5': 'swift+config://store_5/glance/%s',
'store_6': 'swift+config://store_6/glance/%s'
}
for variation, expected_location in variations.items():
image_id = str(uuid.uuid4())
expected_location = expected_location % image_id
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = \
hashlib.md5(expected_swift_contents).hexdigest()
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
conf['default_swift_reference'] = variation
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
loc, size, checksum, _ = self.store.add(image_id, image_swift,
expected_swift_size)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = location.get_location_from_uri(expected_location,
conf=self.conf)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_no_container_no_create(self):
"""
Tests that adding an image with a non-existing container
raises an appropriate exception
"""
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_user'] = 'tenant:user'
conf['swift_store_create_container_on_put'] = False
conf['swift_store_container'] = 'noexist'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
image_swift = six.StringIO("nevergonnamakeit")
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
# We check the exception text to ensure the container
# missing text is found in it, otherwise, we would have
# simply used self.assertRaises here
exception_caught = False
try:
self.store.add(str(uuid.uuid4()), image_swift, 0)
except BackendException as e:
exception_caught = True
self.assertIn("container noexist does not exist "
"in Swift", utils.exception_to_str(e))
self.assertTrue(exception_caught)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 0)
@mock.patch('glance_store._drivers.swift.utils'
'.is_multiple_swift_store_accounts_enabled',
mock.Mock(return_value=True))
def test_add_no_container_and_create(self):
"""
Tests that adding an image with a non-existing container
creates the container automatically if flag is set
"""
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/noexist/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_user'] = 'tenant:user'
conf['swift_store_create_container_on_put'] = True
conf['swift_store_container'] = 'noexist'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
loc, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = location.get_location_from_uri(expected_location, conf=self.conf)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
@mock.patch('glance_store._drivers.swift.utils'
'.is_multiple_swift_store_accounts_enabled',
mock.Mock(return_value=True))
def test_add_no_container_and_multiple_containers_create(self):
"""
Tests that adding an image with a non-existing container while using
multi containers will create the container automatically if flag is set
"""
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
container = 'randomname_' + expected_image_id[:2]
loc = 'swift+config://ref1/%s/%s'
expected_location = loc % (container, expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_user'] = 'tenant:user'
conf['swift_store_create_container_on_put'] = True
conf['swift_store_container'] = 'randomname'
conf['swift_store_multiple_containers_seed'] = 2
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
loc, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = location.get_location_from_uri(expected_location, conf=self.conf)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
@mock.patch('glance_store._drivers.swift.utils'
'.is_multiple_swift_store_accounts_enabled',
mock.Mock(return_value=True))
def test_add_no_container_and_multiple_containers_no_create(self):
"""
Tests that adding an image with a non-existing container while using
multiple containers raises an appropriate exception
"""
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_user'] = 'tenant:user'
conf['swift_store_create_container_on_put'] = False
conf['swift_store_container'] = 'randomname'
conf['swift_store_multiple_containers_seed'] = 2
self.config(**conf)
reload(swift)
expected_image_id = str(uuid.uuid4())
expected_container = 'randomname_' + expected_image_id[:2]
self.store = Store(self.conf)
self.store.configure()
image_swift = six.StringIO("nevergonnamakeit")
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
# We check the exception text to ensure the container
# missing text is found in it, otherwise, we would have
# simply used self.assertRaises here
exception_caught = False
try:
self.store.add(expected_image_id, image_swift, 0)
except BackendException as e:
exception_caught = True
expected_msg = "container %s does not exist in Swift"
expected_msg = expected_msg % expected_container
self.assertIn(expected_msg, utils.exception_to_str(e))
self.assertTrue(exception_caught)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 0)
@mock.patch('glance_store._drivers.swift.utils'
'.is_multiple_swift_store_accounts_enabled',
mock.Mock(return_value=False))
def test_multi_container_doesnt_impact_multi_tenant_add(self):
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_image_id = str(uuid.uuid4())
expected_container = 'container_' + expected_image_id
loc = 'swift+https://some_endpoint/%s/%s'
expected_location = loc % (expected_container, expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.config(swift_store_container='container')
self.config(swift_store_create_container_on_put=True)
self.config(swift_store_multiple_containers_seed=2)
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = mock.MagicMock(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
location, size, checksum, _ = store.add(expected_image_id, image_swift,
expected_swift_size,
context=ctxt)
self.assertEqual(expected_location, location)
@mock.patch('glance_store._drivers.swift.utils'
'.is_multiple_swift_store_accounts_enabled',
mock.Mock(return_value=True))
def test_add_large_object(self):
"""
Tests that adding a very large image. We simulate the large
object by setting store.large_object_size to a small number
and then verify that there have been a number of calls to
put_object()...
"""
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/glance/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.store = Store(self.conf)
self.store.configure()
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
try:
self.store.large_object_size = units.Ki
self.store.large_object_chunk_size = units.Ki
loc, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
self.assertEqual(expected_location, loc)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting 6 objects to be created on Swift -- 5 chunks and 1
# manifest.
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 6)
loc = location.get_location_from_uri(expected_location, conf=self.conf)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_contents)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_large_object_zero_size(self):
"""
Tests that adding an image to Swift which has both an unknown size and
exceeds Swift's maximum limit of 5GB is correctly uploaded.
We avoid the overhead of creating a 5GB object for this test by
temporarily setting MAX_SWIFT_OBJECT_SIZE to 1KB, and then adding
an object of 5KB.
Bug lp:891738
"""
# Set up a 'large' image of 5KB
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/glance/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
# Temporarily set Swift MAX_SWIFT_OBJECT_SIZE to 1KB and add our image,
# explicitly setting the image_length to 0
self.store = Store(self.conf)
self.store.configure()
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
global MAX_SWIFT_OBJECT_SIZE
orig_max_swift_object_size = MAX_SWIFT_OBJECT_SIZE
try:
MAX_SWIFT_OBJECT_SIZE = units.Ki
self.store.large_object_size = units.Ki
self.store.large_object_chunk_size = units.Ki
loc, size, checksum, _ = self.store.add(expected_image_id,
image_swift, 0)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
MAX_SWIFT_OBJECT_SIZE = orig_max_swift_object_size
self.assertEqual(expected_location, loc)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting 7 calls to put_object -- 5 chunks, a zero chunk which is
# then deleted, and the manifest. Note the difference with above
# where the image_size is specified in advance (there's no zero chunk
# in that case).
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 7)
loc = location.get_location_from_uri(expected_location, conf=self.conf)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_contents)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
self.store = Store(self.conf)
self.store.configure()
image_swift = six.StringIO("nevergonnamakeit")
self.assertRaises(exceptions.Duplicate,
self.store.add,
FAKE_UUID, image_swift, 0)
def _option_required(self, key):
conf = self.getConfig()
conf[key] = None
try:
self.config(**conf)
self.store = Store(self.conf)
return not self.store.is_capable(
capabilities.BitMasks.WRITE_ACCESS)
except Exception:
return False
return False
def test_no_store_credentials(self):
"""
Tests that options without a valid credentials disables the add method
"""
self.store = Store(self.conf)
self.store.ref_params = {'ref1': {'auth_address':
'authurl.com', 'user': '',
'key': ''}}
self.store.configure()
self.assertFalse(self.store.is_capable(
capabilities.BitMasks.WRITE_ACCESS))
def test_no_auth_address(self):
"""
Tests that options without auth address disables the add method
"""
self.store = Store(self.conf)
self.store.ref_params = {'ref1': {'auth_address':
'', 'user': 'user1',
'key': 'key1'}}
self.store.configure()
self.assertFalse(self.store.is_capable(
capabilities.BitMasks.WRITE_ACCESS))
def test_delete(self):
"""
Test we can delete an existing image in the swift store
"""
conf = copy.deepcopy(SWIFT_CONF)
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
uri = "swift://%s:key@authurl/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = location.get_location_from_uri(uri, conf=self.conf)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
@mock.patch.object(swiftclient.client, 'delete_object')
def test_delete_slo(self, mock_del_obj):
"""
Test we can delete an existing image stored as SLO, static large object
"""
conf = copy.deepcopy(SWIFT_CONF)
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
uri = "swift://%s:key@authurl/glance/%s" % (self.swift_store_user,
FAKE_UUID2)
loc = location.get_location_from_uri(uri, conf=self.conf)
self.store.delete(loc)
mock_del_obj.assert_called_once()
_, kwargs = mock_del_obj.call_args
self.assertEqual('multipart-manifest=delete',
kwargs.get('query_string'))
@mock.patch.object(swiftclient.client, 'delete_object')
def test_delete_nonslo_not_deleted_as_slo(self, mock_del_obj):
"""
Test that non-SLOs are not being deleted the SLO way
"""
conf = copy.deepcopy(SWIFT_CONF)
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
uri = "swift://%s:key@authurl/glance/%s" % (self.swift_store_user,
FAKE_UUID)
loc = location.get_location_from_uri(uri, conf=self.conf)
self.store.delete(loc)
mock_del_obj.assert_called_once()
_, kwargs = mock_del_obj.call_args
self.assertEqual(None, kwargs.get('query_string'))
def test_delete_with_reference_params(self):
"""
Test we can delete an existing image in the swift store
"""
conf = copy.deepcopy(SWIFT_CONF)
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
uri = "swift+config://ref1/glance/%s" % (FAKE_UUID)
loc = location.get_location_from_uri(uri, conf=self.conf)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a swift that doesn't exist
raises an error
"""
conf = copy.deepcopy(SWIFT_CONF)
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
loc = location.get_location_from_uri(
"swift://%s:key@authurl/glance/noexist" % (self.swift_store_user),
conf=self.conf)
self.assertRaises(exceptions.NotFound, self.store.delete, loc)
def test_delete_with_some_segments_failing(self):
"""
Tests that delete of a segmented object recovers from error(s) while
deleting one or more segments.
To test this we add a segmented object first and then delete it, while
simulating errors on one or more segments.
"""
test_image_id = str(uuid.uuid4())
def fake_head_object(container, object_name):
object_manifest = '/'.join([container, object_name]) + '-'
return {'x-object-manifest': object_manifest}
def fake_get_container(container, **kwargs):
# Returning 5 fake segments
return None, [{'name': '%s-%05d' % (test_image_id, x)}
for x in range(1, 6)]
def fake_delete_object(container, object_name):
# Simulate error on 1st and 3rd segments
global SWIFT_DELETE_OBJECT_CALLS
SWIFT_DELETE_OBJECT_CALLS += 1
if object_name.endswith('001') or object_name.endswith('003'):
raise swiftclient.ClientException('Object DELETE failed')
else:
pass
conf = copy.deepcopy(SWIFT_CONF)
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
loc_uri = "swift+https://%s:key@localhost:8080/glance/%s"
loc_uri = loc_uri % (self.swift_store_user, test_image_id)
loc = location.get_location_from_uri(loc_uri)
conn = self.store.get_connection(loc.store_location)
conn.delete_object = fake_delete_object
conn.head_object = fake_head_object
conn.get_container = fake_get_container
global SWIFT_DELETE_OBJECT_CALLS
SWIFT_DELETE_OBJECT_CALLS = 0
self.store.delete(loc, connection=conn)
# Expecting 6 delete calls, 5 for the segments and 1 for the manifest
self.assertEqual(SWIFT_DELETE_OBJECT_CALLS, 6)
def test_read_acl_public(self):
"""
Test that we can set a public read acl.
"""
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = location.get_location_from_uri(uri, conf=self.conf)
ctxt = mock.MagicMock()
store.set_acls(loc, public=True, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Read'],
"*:*")
def test_read_acl_tenants(self):
"""
Test that we can set read acl for tenants.
"""
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = location.get_location_from_uri(uri, conf=self.conf)
read_tenants = ['matt', 'mark']
ctxt = mock.MagicMock()
store.set_acls(loc, read_tenants=read_tenants, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Read'],
'matt:*,mark:*')
def test_write_acls(self):
"""
Test that we can set write acl for tenants.
"""
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = location.get_location_from_uri(uri, conf=self.conf)
read_tenants = ['frank', 'jim']
ctxt = mock.MagicMock()
store.set_acls(loc, write_tenants=read_tenants, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Write'],
'frank:*,jim:*')
class TestStoreAuthV1(base.StoreBaseTest, SwiftTests,
test_store_capabilities.TestStoreCapabilitiesChecking):
_CONF = cfg.CONF
def getConfig(self):
conf = SWIFT_CONF.copy()
conf['swift_store_auth_version'] = '1'
conf['swift_store_user'] = 'tenant:user1'
return conf
def setUp(self):
"""Establish a clean test environment."""
super(TestStoreAuthV1, self).setUp()
conf = self.getConfig()
conf_file = 'glance-swift.conf'
self.swift_config_file = self.copy_data_file(conf_file, self.test_dir)
conf.update({'swift_store_config_file': self.swift_config_file})
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
stub_out_swiftclient(self.stubs, conf['swift_store_auth_version'])
self.store = Store(self.conf)
self.config(**conf)
self.store.configure()
self.register_store_schemes(self.store, 'swift')
self.addCleanup(self.conf.reset)
class TestStoreAuthV2(TestStoreAuthV1):
def getConfig(self):
conf = super(TestStoreAuthV2, self).getConfig()
conf['swift_store_auth_version'] = '2'
conf['swift_store_user'] = 'tenant:user1'
return conf
def test_v2_with_no_tenant(self):
uri = "swift://failme:key@auth_address/glance/%s" % (FAKE_UUID)
loc = location.get_location_from_uri(uri, conf=self.conf)
self.assertRaises(exceptions.BadStoreUri,
self.store.get,
loc)
def test_v2_multi_tenant_location(self):
conf = self.getConfig()
conf['swift_store_multi_tenant'] = True
uri = "swift://auth_address/glance/%s" % (FAKE_UUID)
loc = location.get_location_from_uri(uri, conf=self.conf)
self.assertEqual('swift', loc.store_name)
class FakeConnection(object):
def __init__(self, authurl, user, key, retries=5, preauthurl=None,
preauthtoken=None, starting_backoff=1, tenant_name=None,
os_options=None, auth_version="1", insecure=False,
ssl_compression=True, cacert=None):
if os_options is None:
os_options = {}
self.authurl = authurl
self.user = user
self.key = key
self.preauthurl = preauthurl
self.preauthtoken = preauthtoken
self.tenant_name = tenant_name
self.os_options = os_options
self.auth_version = auth_version
self.insecure = insecure
self.cacert = cacert
class TestSingleTenantStoreConnections(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestSingleTenantStoreConnections, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
self.stubs.Set(swiftclient, 'Connection', FakeConnection)
self.store = swift.SingleTenantStore(self.conf)
self.store.configure()
specs = {'scheme': 'swift',
'auth_or_store_url': 'example.com/v2/',
'user': 'tenant:user1',
'key': 'key1',
'container': 'cont',
'obj': 'object'}
self.location = swift.StoreLocation(specs, self.conf)
self.addCleanup(self.conf.reset)
def test_basic_connection(self):
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'user1')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertEqual(connection.key, 'key1')
self.assertIsNone(connection.preauthurl)
self.assertFalse(connection.insecure)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_conf_endpoint(self):
ctx = mock.MagicMock(user='tenant:user1', tenant='tenant')
self.config(swift_store_endpoint='https://internal.com')
self.store.configure()
connection = self.store.get_connection(self.location, context=ctx)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'user1')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertEqual(connection.key, 'key1')
self.assertEqual(connection.preauthurl, 'https://internal.com')
self.assertFalse(connection.insecure)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_conf_endpoint_no_context(self):
self.config(swift_store_endpoint='https://internal.com')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'user1')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertEqual(connection.key, 'key1')
self.assertEqual(connection.preauthurl, 'https://internal.com')
self.assertFalse(connection.insecure)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_no_trailing_slash(self):
self.location.auth_or_store_url = 'example.com/v2'
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
def test_connection_insecure(self):
self.config(swift_store_auth_insecure=True)
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertTrue(connection.insecure)
def test_connection_with_auth_v1(self):
self.config(swift_store_auth_version='1')
self.store.configure()
self.location.user = 'auth_v1_user'
connection = self.store.get_connection(self.location)
self.assertEqual(connection.auth_version, '1')
self.assertEqual(connection.user, 'auth_v1_user')
self.assertIsNone(connection.tenant_name)
def test_connection_invalid_user(self):
self.store.configure()
self.location.user = 'invalid:format:user'
self.assertRaises(exceptions.BadStoreUri,
self.store.get_connection, self.location)
def test_connection_missing_user(self):
self.store.configure()
self.location.user = None
self.assertRaises(exceptions.BadStoreUri,
self.store.get_connection, self.location)
def test_connection_with_region(self):
self.config(swift_store_region='Sahara')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'region_name': 'Sahara',
'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_service_type(self):
self.config(swift_store_service_type='shoe-store')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'service_type': 'shoe-store',
'endpoint_type': 'publicURL'})
def test_connection_with_endpoint_type(self):
self.config(swift_store_endpoint_type='internalURL')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'internalURL'})
def test_bad_location_uri(self):
self.store.configure()
self.location.uri = 'http://bad_uri://'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
def test_bad_location_uri_invalid_credentials(self):
self.store.configure()
self.location.uri = 'swift://bad_creds@uri/cont/obj'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
def test_bad_location_uri_invalid_object_path(self):
self.store.configure()
self.location.uri = 'swift://user:key@uri/cont'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
class TestMultiTenantStoreConnections(base.StoreBaseTest):
def setUp(self):
super(TestMultiTenantStoreConnections, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
self.stubs.Set(swiftclient, 'Connection', FakeConnection)
self.context = mock.MagicMock(
user='tenant:user1', tenant='tenant', auth_token='0123')
self.store = swift.MultiTenantStore(self.conf)
specs = {'scheme': 'swift',
'auth_or_store_url': 'example.com',
'container': 'cont',
'obj': 'object'}
self.location = swift.StoreLocation(specs, self.conf)
self.addCleanup(self.conf.reset)
def test_basic_connection(self):
self.store.configure()
connection = self.store.get_connection(self.location,
context=self.context)
self.assertIsNone(connection.authurl)
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'tenant:user1')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertIsNone(connection.key)
self.assertEqual(connection.preauthurl, 'https://example.com')
self.assertEqual(connection.preauthtoken, '0123')
self.assertEqual(connection.os_options, {})
class TestMultiTenantStoreContext(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
"""Establish a clean test environment."""
super(TestMultiTenantStoreContext, self).setUp()
conf = SWIFT_CONF.copy()
self.store = Store(self.conf)
self.config(**conf)
self.store.configure()
self.register_store_schemes(self.store, 'swift')
self.service_catalog = [{
"name": "Object Storage",
"type": "object-store",
"endpoints": [{
"publicURL": "http://127.0.0.1:0",
"region": "region1",
"versionId": "1.0",
}]
}]
self.addCleanup(self.conf.reset)
@requests_mock.mock()
def test_download_context(self, m):
"""Verify context (ie token) is passed to swift on download."""
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://127.0.0.1/glance_123/123"
loc = location.get_location_from_uri(uri, conf=self.conf)
ctx = mock.MagicMock(
service_catalog=self.service_catalog, user='tenant:user1',
tenant='tenant', auth_token='0123')
m.get("http://127.0.0.1/glance_123/123")
store.get(loc, context=ctx)
self.assertEqual('0123', m.last_request.headers['X-Auth-Token'])
@requests_mock.mock()
def test_upload_context(self, m):
"""Verify context (ie token) is passed to swift on upload."""
head_req = m.head("http://127.0.0.1/glance_123",
text='Some data',
status_code=201)
put_req = m.put("http://127.0.0.1/glance_123/123")
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
pseudo_file = StringIO.StringIO('Some data')
ctx = mock.MagicMock(
service_catalog=self.service_catalog, user='tenant:user1',
tenant='tenant', auth_token='0123')
store.add('123', pseudo_file, pseudo_file.len,
context=ctx)
self.assertEqual('0123', head_req.last_request.headers['X-Auth-Token'])
self.assertEqual('0123', put_req.last_request.headers['X-Auth-Token'])
class FakeGetEndpoint(object):
def __init__(self, response):
self.response = response
def __call__(self, service_catalog, service_type=None,
endpoint_region=None, endpoint_type=None):
self.service_type = service_type
self.endpoint_region = endpoint_region
self.endpoint_type = endpoint_type
return self.response
class TestCreatingLocations(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestCreatingLocations, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
conf = copy.deepcopy(SWIFT_CONF)
self.store = Store(self.conf)
self.config(**conf)
reload(swift)
self.addCleanup(self.conf.reset)
def test_single_tenant_location(self):
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_container'] = 'container'
conf_file = "glance-swift.conf"
self.swift_config_file = self.copy_data_file(conf_file, self.test_dir)
conf.update({'swift_store_config_file': self.swift_config_file})
conf['default_swift_reference'] = 'ref1'
self.config(**conf)
reload(swift)
store = swift.SingleTenantStore(self.conf)
store.configure()
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+https')
self.assertEqual(location.swift_url, 'https://example.com')
self.assertEqual(location.container, 'container')
self.assertEqual(location.obj, 'image-id')
self.assertEqual(location.user, 'tenant:user1')
self.assertEqual(location.key, 'key1')
def test_single_tenant_location_http(self):
conf_file = "glance-swift.conf"
test_dir = self.useFixture(fixtures.TempDir()).path
self.swift_config_file = self.copy_data_file(conf_file, test_dir)
self.config(swift_store_container='container',
default_swift_reference='ref2',
swift_store_config_file=self.swift_config_file)
store = swift.SingleTenantStore(self.conf)
store.configure()
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+http')
self.assertEqual(location.swift_url, 'http://example.com')
def test_multi_tenant_location(self):
self.config(swift_store_container='container')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = mock.MagicMock(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
location = store.create_location('image-id', context=ctxt)
self.assertEqual(location.scheme, 'swift+https')
self.assertEqual(location.swift_url, 'https://some_endpoint')
self.assertEqual(location.container, 'container_image-id')
self.assertEqual(location.obj, 'image-id')
self.assertIsNone(location.user)
self.assertIsNone(location.key)
self.assertEqual(fake_get_endpoint.service_type, 'object-store')
def test_multi_tenant_location_http(self):
fake_get_endpoint = FakeGetEndpoint('http://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = mock.MagicMock(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
location = store.create_location('image-id', context=ctxt)
self.assertEqual(location.scheme, 'swift+http')
self.assertEqual(location.swift_url, 'http://some_endpoint')
def test_multi_tenant_location_with_region(self):
self.config(swift_store_region='WestCarolina')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = mock.MagicMock(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.endpoint_region, 'WestCarolina')
def test_multi_tenant_location_custom_service_type(self):
self.config(swift_store_service_type='toy-store')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = mock.MagicMock(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.service_type, 'toy-store')
def test_multi_tenant_location_custom_endpoint_type(self):
self.config(swift_store_endpoint_type='InternalURL')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = mock.MagicMock(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.endpoint_type, 'InternalURL')
class TestChunkReader(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestChunkReader, self).setUp()
conf = copy.deepcopy(SWIFT_CONF)
Store(self.conf)
self.config(**conf)
def test_read_all_data(self):
"""
Replicate what goes on in the Swift driver with the
repeated creation of the ChunkReader object
"""
CHUNKSIZE = 100
checksum = hashlib.md5()
data_file = tempfile.NamedTemporaryFile()
data_file.write('*' * units.Ki)
data_file.flush()
infile = open(data_file.name, 'rb')
bytes_read = 0
while True:
cr = swift.ChunkReader(infile, checksum, CHUNKSIZE)
chunk = cr.read(CHUNKSIZE)
bytes_read += len(chunk)
if not chunk:
break
self.assertEqual(units.Ki, bytes_read)
data_file.close()
infile.close()
class TestMultipleContainers(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestMultipleContainers, self).setUp()
self.config(swift_store_multiple_containers_seed=3)
self.store = swift.SingleTenantStore(self.conf)
self.store.configure()
def test_get_container_name_happy_path_with_seed_three(self):
test_image_id = 'fdae39a1-bac5-4238-aba4-69bcc726e848'
actual = self.store.get_container_name(test_image_id,
'default_container')
expected = 'default_container_fda'
self.assertEqual(expected, actual)
def test_get_container_name_with_negative_seed(self):
self.config(swift_store_multiple_containers_seed=-1)
self.store = swift.SingleTenantStore(self.conf)
test_image_id = 'random_id'
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.get_container_name, test_image_id,
'default_container')
def test_get_container_name_with_seed_beyond_max(self):
self.config(swift_store_multiple_containers_seed=33)
self.store = swift.SingleTenantStore(self.conf)
test_image_id = 'random_id'
self.assertRaises(exceptions.BadStoreConfiguration,
self.store.get_container_name, test_image_id,
'default_container')
def test_get_container_name_with_max_seed(self):
self.config(swift_store_multiple_containers_seed=32)
self.store = swift.SingleTenantStore(self.conf)
test_image_id = 'fdae39a1-bac5-4238-aba4-69bcc726e848'
actual = self.store.get_container_name(test_image_id,
'default_container')
expected = 'default_container_' + test_image_id
self.assertEqual(expected, actual)
def test_get_container_name_with_dash(self):
self.config(swift_store_multiple_containers_seed=10)
self.store = swift.SingleTenantStore(self.conf)
test_image_id = 'fdae39a1-bac5-4238-aba4-69bcc726e848'
actual = self.store.get_container_name(test_image_id,
'default_container')
expected = 'default_container_' + 'fdae39a1-ba'
self.assertEqual(expected, actual)
def test_get_container_name_with_min_seed(self):
self.config(swift_store_multiple_containers_seed=1)
self.store = swift.SingleTenantStore(self.conf)
test_image_id = 'fdae39a1-bac5-4238-aba4-69bcc726e848'
actual = self.store.get_container_name(test_image_id,
'default_container')
expected = 'default_container_' + 'f'
self.assertEqual(expected, actual)
def test_get_container_name_with_multiple_containers_turned_off(self):
self.config(swift_store_multiple_containers_seed=0)
self.store.configure()
test_image_id = 'random_id'
actual = self.store.get_container_name(test_image_id,
'default_container')
expected = 'default_container'
self.assertEqual(expected, actual)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import math
import os
import shutil
import sys
import zipfile
from os import path
import numpy as np
import mxnet as mx
from mxnet import gluon, autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.contrib import nn as contrib_nn
from mxnet.image import CenterCropAug, ResizeAug
from mxnet.io import PrefetchingIter
from mxnet.test_utils import download
this_dir = path.abspath(path.dirname(__file__))
sys.path.append(path.join(this_dir, path.pardir))
from data import ImagePairIter
# CLI
parser = argparse.ArgumentParser(description='Super-resolution using an efficient sub-pixel convolution neural network.')
parser.add_argument('--upscale_factor', type=int, default=3, help="super resolution upscale factor. default is 3.")
parser.add_argument('--batch_size', type=int, default=4, help='training batch size, per device. default is 4.')
parser.add_argument('--test_batch_size', type=int, default=100, help='test batch size')
parser.add_argument('--epochs', type=int, default=30, help='number of training epochs')
parser.add_argument('--lr', type=float, default=0.001, help='learning Rate. default is 0.001.')
parser.add_argument('--use-gpu', action='store_true', help='whether to use GPU.')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
parser.add_argument('--resolve_img', type=str, help='input image to use')
opt = parser.parse_args()
print(opt)
upscale_factor = opt.upscale_factor
batch_size, test_batch_size = opt.batch_size, opt.test_batch_size
color_flag = 0
# Get data from https://github.com/BIDS/BSDS500/
# The BSDS500 Dataset is copyright Berkeley Computer Vision Group
# For more details, see https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html#bsds500
datasets_dir = path.expanduser(path.join("~", ".mxnet", "datasets"))
datasets_tmpdir = path.join(datasets_dir, "tmp")
dataset_url = "https://github.com/BIDS/BSDS500/archive/master.zip"
data_dir = path.expanduser(path.join(datasets_dir, "BSDS500"))
tmp_dir = path.join(data_dir, "tmp")
def get_dataset(prefetch=False):
"""Download the BSDS500 dataset and return train and test iters."""
if path.exists(data_dir):
print(
"Directory {} already exists, skipping.\n"
"To force download and extraction, delete the directory and re-run."
"".format(data_dir),
file=sys.stderr,
)
else:
print("Downloading dataset...", file=sys.stderr)
downloaded_file = download(dataset_url, dirname=datasets_tmpdir)
print("done", file=sys.stderr)
print("Extracting files...", end="", file=sys.stderr)
os.makedirs(data_dir)
os.makedirs(tmp_dir)
with zipfile.ZipFile(downloaded_file) as archive:
archive.extractall(tmp_dir)
shutil.rmtree(datasets_tmpdir)
shutil.copytree(
path.join(tmp_dir, "BSDS500-master", "BSDS500", "data", "images"),
path.join(data_dir, "images"),
)
shutil.copytree(
path.join(tmp_dir, "BSDS500-master", "BSDS500", "data", "groundTruth"),
path.join(data_dir, "groundTruth"),
)
shutil.rmtree(tmp_dir)
print("done", file=sys.stderr)
crop_size = 256
crop_size -= crop_size % upscale_factor
input_crop_size = crop_size // upscale_factor
input_transform = [CenterCropAug((crop_size, crop_size)), ResizeAug(input_crop_size)]
target_transform = [CenterCropAug((crop_size, crop_size))]
iters = (
ImagePairIter(
path.join(data_dir, "images", "train"),
(input_crop_size, input_crop_size),
(crop_size, crop_size),
batch_size,
color_flag,
input_transform,
target_transform,
),
ImagePairIter(
path.join(data_dir, "images", "test"),
(input_crop_size, input_crop_size),
(crop_size, crop_size),
test_batch_size,
color_flag,
input_transform,
target_transform,
),
)
return [PrefetchingIter(i) for i in iters] if prefetch else iters
train_data, val_data = get_dataset()
mx.random.seed(opt.seed)
ctx = [mx.gpu(0)] if opt.use_gpu else [mx.cpu()]
class SuperResolutionNet(gluon.HybridBlock):
def __init__(self, upscale_factor):
super(SuperResolutionNet, self).__init__()
with self.name_scope():
self.conv1 = nn.Conv2D(64, (5, 5), strides=(1, 1), padding=(2, 2), activation='relu')
self.conv2 = nn.Conv2D(64, (3, 3), strides=(1, 1), padding=(1, 1), activation='relu')
self.conv3 = nn.Conv2D(32, (3, 3), strides=(1, 1), padding=(1, 1), activation='relu')
self.conv4 = nn.Conv2D(upscale_factor ** 2, (3, 3), strides=(1, 1), padding=(1, 1))
self.pxshuf = contrib_nn.PixelShuffle2D(upscale_factor)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.pxshuf(x)
return x
net = SuperResolutionNet(upscale_factor)
metric = mx.metric.MSE()
def test(ctx):
val_data.reset()
avg_psnr = 0
batches = 0
for batch in val_data:
batches += 1
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
outputs = []
for x in data:
outputs.append(net(x))
metric.update(label, outputs)
avg_psnr += 10 * math.log10(1/metric.get()[1])
metric.reset()
avg_psnr /= batches
print('validation avg psnr: %f' % avg_psnr)
def train(epoch, ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
net.initialize(mx.init.Orthogonal(), ctx=ctx)
# re-initialize conv4's weight to be Orthogonal
net.conv4.initialize(mx.init.Orthogonal(scale=1), force_reinit=True, ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': opt.lr})
loss = gluon.loss.L2Loss()
for i in range(epoch):
train_data.reset()
for batch in train_data:
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
outputs = []
with ag.record():
for x, y in zip(data, label):
z = net(x)
L = loss(z, y)
L.backward()
outputs.append(z)
trainer.step(batch.data[0].shape[0])
metric.update(label, outputs)
name, acc = metric.get()
metric.reset()
print('training mse at epoch %d: %s=%f'%(i, name, acc))
test(ctx)
net.save_parameters(path.join(this_dir, 'superres.params'))
def resolve(ctx):
from PIL import Image
if isinstance(ctx, list):
ctx = [ctx[0]]
img_basename = path.splitext(path.basename(opt.resolve_img))[0]
img_dirname = path.dirname(opt.resolve_img)
net.load_parameters(path.join(this_dir, 'superres.params'), ctx=ctx)
img = Image.open(opt.resolve_img).convert('YCbCr')
y, cb, cr = img.split()
data = mx.nd.expand_dims(mx.nd.expand_dims(mx.nd.array(y), axis=0), axis=0)
out_img_y = mx.nd.reshape(net(data), shape=(-3, -2)).asnumpy()
out_img_y = out_img_y.clip(0, 255)
out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')
out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')
out_img.save(path.join(img_dirname, '{}-resolved.png'.format(img_basename)))
if opt.resolve_img:
resolve(ctx)
else:
train(opt.epochs, ctx)
| |
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for model.py."""
from datetime import datetime
from google.appengine.ext import db
import unittest
import model
from utils import get_utcnow, set_utcnow_for_test
class ModelTests(unittest.TestCase):
'''Test the loose odds and ends.'''
def setUp(self):
set_utcnow_for_test(datetime(2010, 1, 1))
self.p1 = model.Person.create_original(
'haiti',
first_name='John',
last_name='Smith',
home_street='Washington St.',
home_city='Los Angeles',
home_state='California',
home_postal_code='11111',
home_neighborhood='Good Neighborhood',
author_name='Alice Smith',
author_phone='111-111-1111',
author_email='alice.smith@gmail.com',
source_url='https://www.source.com',
source_date=datetime(2010, 1, 1),
source_name='Source Name',
entry_date=datetime(2010, 1, 1),
expiry_date=datetime(2010, 2, 1),
other='')
self.p2 = model.Person.create_original(
'haiti',
first_name='Tzvika',
last_name='Hartman',
home_street='Herzl St.',
home_city='Tel Aviv',
home_state='Israel',
entry_date=datetime(2010, 1, 1),
expiry_date=datetime(2010, 3, 1),
other='')
self.key_p1 = db.put(self.p1)
self.key_p2 = db.put(self.p2)
self.n1_1 = model.Note.create_original(
'haiti',
person_record_id=self.p1.record_id,
linked_person_record_id=self.p2.record_id,
status=u'believed_missing',
found=False,
entry_date=get_utcnow(),
source_date=datetime(2000, 1, 1))
self.n1_2 = model.Note.create_original(
'haiti',
person_record_id=self.p1.record_id,
found=True,
entry_date=get_utcnow(),
source_date=datetime(2000, 2, 2))
self.key_n1_1 = db.put(self.n1_1)
self.key_n1_2 = db.put(self.n1_2)
# Update the Person entity according to the Note.
self.p1.update_from_note(self.n1_1)
self.p1.update_from_note(self.n1_2)
db.put(self.p1)
def tearDown(self):
db.delete([self.key_p1, self.key_p2, self.key_n1_1, self.key_n1_2])
def test_person(self):
assert self.p1.first_name == 'John'
assert self.p1.photo_url == ''
assert self.p1.is_clone() == False
assert model.Person.get('haiti', self.p1.record_id).record_id == \
self.p1.record_id
assert model.Person.get('haiti', self.p2.record_id).record_id == \
self.p2.record_id
assert model.Person.get('haiti', self.p1.record_id).record_id != \
self.p2.record_id
# Testing prefix properties
assert hasattr(self.p1, 'first_name_n_')
assert hasattr(self.p1, 'home_street_n1_')
assert hasattr(self.p1, 'home_postal_code_n2_')
# Testing indexing properties
assert self.p1._fields_to_index_properties == \
['first_name', 'last_name']
assert self.p1._fields_to_index_by_prefix_properties == \
['first_name', 'last_name']
# Test propagation of Note fields to Person.
assert self.p1.latest_status == u'believed_missing' # from first note
assert self.p1.latest_status_source_date == datetime(2000, 1, 1)
assert self.p1.latest_found == True # from second note
assert self.p1.latest_found_source_date == datetime(2000, 2, 2)
# Adding a Note with only 'found' should not affect 'last_status'.
n1_3 = model.Note.create_original(
'haiti', person_record_id=self.p1.record_id, found=False,
entry_date=get_utcnow(), source_date=datetime(2000, 3, 3))
self.p1.update_from_note(n1_3)
assert self.p1.latest_status == u'believed_missing'
assert self.p1.latest_status_source_date == datetime(2000, 1, 1)
assert self.p1.latest_found == False
assert self.p1.latest_found_source_date == datetime(2000, 3, 3)
# Adding a Note with only 'status' should not affect 'last_found'.
n1_4 = model.Note.create_original(
'haiti', person_record_id=self.p1.record_id,
found=None, status=u'is_note_author',
entry_date=get_utcnow(),
source_date=datetime(2000, 4, 4))
self.p1.update_from_note(n1_4)
assert self.p1.latest_status == u'is_note_author'
assert self.p1.latest_status_source_date == datetime(2000, 4, 4)
assert self.p1.latest_found == False
assert self.p1.latest_found_source_date == datetime(2000, 3, 3)
# Adding an older Note should not affect either field.
n1_5 = model.Note.create_original(
'haiti', person_record_id=self.p1.record_id,
found=True, status=u'believed_alive',
entry_date=get_utcnow(),
source_date=datetime(2000, 1, 2))
self.p1.update_from_note(n1_5)
assert self.p1.latest_status == u'is_note_author'
assert self.p1.latest_status_source_date == datetime(2000, 4, 4)
assert self.p1.latest_found == False
assert self.p1.latest_found_source_date == datetime(2000, 3, 3)
# Adding a Note with a date in between should affect only one field.
n1_6 = model.Note.create_original(
'haiti', person_record_id=self.p1.record_id,
found=True, status=u'believed_alive',
entry_date=get_utcnow(),
source_date=datetime(2000, 3, 4))
self.p1.update_from_note(n1_6)
assert self.p1.latest_status == u'is_note_author'
assert self.p1.latest_status_source_date == datetime(2000, 4, 4)
assert self.p1.latest_found == True
assert self.p1.latest_found_source_date == datetime(2000, 3, 4)
def test_note(self):
assert self.n1_1.is_clone() == False
notes = self.p1.get_notes()
assert notes[0].record_id == self.n1_1.record_id
assert notes[1].record_id == self.n1_2.record_id
assert self.p1.get_linked_persons()[0].record_id == self.p2.record_id
assert self.p2.get_linked_persons() == []
assert model.Note.get('haiti', self.n1_1.record_id).record_id == \
self.n1_1.record_id
assert model.Note.get('haiti', self.n1_2.record_id).record_id == \
self.n1_2.record_id
def test_subscription(self):
sd = 'haiti'
email1 = 'one@example.com'
email2 = 'two@example.com'
s1 = model.Subscription.create(sd, self.p1.record_id, email1, 'fr')
s2 = model.Subscription.create(sd, self.p1.record_id, email2, 'en')
key_s1 = db.put(s1)
key_s2 = db.put(s2)
assert model.Subscription.get(sd, self.p1.record_id, email1) is not None
assert model.Subscription.get(sd, self.p1.record_id, email2) is not None
assert model.Subscription.get(sd, self.p2.record_id, email1) is None
assert model.Subscription.get(sd, self.p2.record_id, email2) is None
assert len(self.p1.get_subscriptions()) == 2
assert len(self.p2.get_subscriptions()) == 0
s3 = model.Subscription.create(sd, self.p1.record_id, email2, 'ar')
key_s3 = db.put(s3)
assert len(self.p1.get_subscriptions()) == 2
assert model.Subscription.get(
sd, self.p1.record_id, email2).language == 'ar'
db.delete([key_s1, key_s2, key_s3])
def test_past_due(self):
"""Make sure Person records are detected as past due correctly."""
def assert_past_due_count(expected):
assert len(list(model.Person.past_due_records())) == expected
assert_past_due_count(0)
set_utcnow_for_test(datetime(2010, 2, 15))
assert_past_due_count(1)
set_utcnow_for_test(datetime(2010, 3, 15))
assert_past_due_count(2)
def test_put_expiry_flags(self):
# Try put_expiry_flags when the record has not expired yet.
self.p1.put_expiry_flags()
# Both entities should be unexpired.
p1 = db.get(self.p1.key())
assert not p1.is_expired
assert p1.first_name == 'John'
n1_1 = db.get(self.n1_1.key())
assert not n1_1.is_expired
# Advance past the expiry date and try again.
set_utcnow_for_test(datetime(2010, 2, 3))
p1.put_expiry_flags()
# Both entities should be expired.
p1 = db.get(self.p1.key())
assert p1.is_expired
assert p1.first_name == 'John'
assert p1.source_date == datetime(2010, 2, 3)
assert p1.entry_date == datetime(2010, 2, 3)
assert p1.expiry_date == datetime(2010, 2, 1)
n1_1 = db.get(self.n1_1.key())
assert n1_1.is_expired
def test_wipe_contents(self):
# Advance past the expiry date.
set_utcnow_for_test(datetime(2010, 2, 3))
self.p1.put_expiry_flags()
# Try wiping the contents.
self.p1.wipe_contents()
p1 = db.get(self.p1.key())
assert p1.is_expired
assert p1.first_name == None
assert p1.source_date == datetime(2010, 2, 3)
assert p1.entry_date == datetime(2010, 2, 3)
assert p1.expiry_date == datetime(2010, 2, 1)
assert not db.get(self.n1_1.key())
if __name__ == '__main__':
unittest.main()
| |
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
#
# License: Simplified BSD
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_allclose,
assert_array_less)
import pytest
import mne
from mne.datasets import testing
from mne.label import read_label
from mne import (read_cov, read_forward_solution, read_evokeds,
convert_forward_solution)
from mne.inverse_sparse import mixed_norm, tf_mixed_norm
from mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles, _split_gof
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.minimum_norm.tests.test_inverse import \
assert_var_exp_log, assert_stc_res
from mne.utils import assert_stcs_equal, catch_logging
from mne.dipole import Dipole
from mne.source_estimate import VolSourceEstimate
data_path = testing.data_path(download=False)
# NOTE: These use the ave and cov from sample dataset (no _trunc)
fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
label = 'Aud-rh'
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
@pytest.fixture(scope='module', params=[testing._pytest_param])
def forward():
"""Get a forward solution."""
# module scope it for speed (but don't overwrite in use!)
return read_forward_solution(fname_fwd)
@testing.requires_testing_data
@pytest.mark.timeout(150) # ~30 sec on Travis Linux
@pytest.mark.slowtest
def test_mxne_inverse_standard(forward):
"""Test (TF-)MxNE inverse computation."""
# Read noise covariance matrix
cov = read_cov(fname_cov)
# Handling average file
loose = 0.0
depth = 0.9
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
label = read_label(fname_label)
assert label.hemi == 'rh'
forward = convert_forward_solution(forward, surf_ori=True)
# Reduce source space to make test computation faster
inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov,
loose=loose, depth=depth,
fixed=True, use_cps=True)
stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9.,
method='dSPM')
stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0
stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.
weights_min = 0.5
# MxNE tests
alpha = 70 # spatial regularization parameter
stc_prox = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, weights=stc_dspm,
weights_min=weights_min, solver='prox')
with pytest.warns(None): # CD
stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, weights=stc_dspm,
weights_min=weights_min, solver='cd')
stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='bcd')
assert_array_almost_equal(stc_prox.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5)
assert_allclose(stc_prox.data, stc_cd.data, rtol=1e-3, atol=0.0)
assert_allclose(stc_prox.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert stc_prox.vertices[1][0] in label.vertices
assert stc_cd.vertices[1][0] in label.vertices
assert stc_bcd.vertices[1][0] in label.vertices
# vector
with pytest.warns(None): # no convergence
stc = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2)
with pytest.warns(None): # no convergence
stc_vec = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2,
pick_ori='vector')
assert_stcs_equal(stc_vec.magnitude(), stc)
with pytest.warns(None), pytest.raises(ValueError, match='pick_ori='):
mixed_norm(evoked_l21, forward, cov, alpha, loose=0, maxit=2,
pick_ori='vector')
with pytest.warns(None), catch_logging() as log: # CD
dips = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd', return_as_dipoles=True, verbose=True)
stc_dip = make_stc_from_dipoles(dips, forward['src'])
assert isinstance(dips[0], Dipole)
assert stc_dip.subject == "sample"
assert_stcs_equal(stc_cd, stc_dip)
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
# Single time point things should match
with pytest.warns(None), catch_logging() as log:
dips = mixed_norm(evoked_l21.copy().crop(0.081, 0.081),
forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd', return_as_dipoles=True, verbose=True)
assert_var_exp_log(log.getvalue(), 37.8, 38.0) # 37.9
gof = sum(dip.gof[0] for dip in dips) # these are now partial exp vars
assert_allclose(gof, 37.9, atol=0.1)
with pytest.warns(None), catch_logging() as log:
stc, res = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
weights=stc_dspm, # gh-6382
active_set_size=10, return_residual=True,
solver='cd', verbose=True)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
assert stc.data.min() < -1e-9 # signed
assert_stc_res(evoked_l21, stc, forward, res)
# irMxNE tests
with pytest.warns(None), catch_logging() as log: # CD
stc, residual = mixed_norm(
evoked_l21, forward, cov, alpha, n_mxne_iter=5, loose=0.0001,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
solver='cd', return_residual=True, pick_ori='vector', verbose=True)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
assert stc.vertices == [[63152], [79017]]
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
assert_stc_res(evoked_l21, stc, forward, residual)
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, forward, cov,
loose=loose, depth=depth, maxit=100, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, return_residual=True,
alpha=alpha, l1_ratio=l1_ratio)
assert_array_almost_equal(stc.times, evoked.times, 5)
assert stc.vertices[1][0] in label.vertices
# vector
stc_nrm = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio)
stc_vec, residual = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio,
pick_ori='vector', return_residual=True)
assert_stcs_equal(stc_vec.magnitude(), stc_nrm)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=101, l1_ratio=0.03)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=50., l1_ratio=1.01)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_mxne_vol_sphere():
"""Test (TF-)MxNE with a sphere forward and volumic source space."""
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
cov = read_cov(fname_cov)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
info = evoked.info
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
src = mne.setup_volume_source_space(subject=None, pos=15., mri=None,
sphere=(0.0, 0.0, 0.0, 0.08),
bem=None, mindist=5.0,
exclude=2.0, sphere_units='m')
fwd = mne.make_forward_solution(info, trans=None, src=src,
bem=sphere, eeg=False, meg=True)
alpha = 80.
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.0, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.2, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
# irMxNE tests
with catch_logging() as log:
stc = mixed_norm(evoked_l21, fwd, cov, alpha,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10, verbose=True)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert_var_exp_log(log.getvalue(), 9, 11) # 10.2
# Compare orientation obtained using fit_dipole and gamma_map
# for a simulated evoked containing a single dipole
stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4),
vertices=[stc.vertices[0][:1]],
tmin=stc.tmin,
tstep=stc.tstep)
evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9,
use_cps=True)
dip_mxne = mixed_norm(evoked_dip, fwd, cov, alpha=80,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10, return_as_dipoles=True)
amp_max = [np.max(d.amplitude) for d in dip_mxne]
dip_mxne = dip_mxne[np.argmax(amp_max)]
assert dip_mxne.pos[0] in src[0]['rr'][stc.vertices[0]]
dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0]
assert np.abs(np.dot(dip_fit.ori[0], dip_mxne.ori[0])) > 0.99
dist = 1000 * np.linalg.norm(dip_fit.pos[0] - dip_mxne.pos[0])
assert dist < 4. # within 4 mm
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, fwd, cov, maxit=3, tol=1e-4,
tstep=16, wsize=32, window=0.1, alpha=alpha,
l1_ratio=l1_ratio, return_residual=True)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked.times, 5)
@pytest.mark.parametrize('mod', (
None, 'mult', 'augment', 'sign', 'zero', 'less'))
def test_split_gof_basic(mod):
"""Test splitting the goodness of fit."""
# first a trivial case
gain = np.array([[0., 1., 1.], [1., 1., 0.]]).T
M = np.ones((3, 1))
X = np.ones((2, 1))
M_est = gain @ X
assert_allclose(M_est, np.array([[1., 2., 1.]]).T) # a reasonable estimate
if mod == 'mult':
gain *= [1., -0.5]
X[1] *= -2
elif mod == 'augment':
gain = np.concatenate((gain, np.zeros((3, 1))), axis=1)
X = np.concatenate((X, [[1.]]))
elif mod == 'sign':
gain[1] *= -1
M[1] *= -1
M_est[1] *= -1
elif mod in ('zero', 'less'):
gain = np.array([[1, 1., 1.], [1., 1., 1.]]).T
if mod == 'zero':
X[:, 0] = [1., 0.]
else:
X[:, 0] = [1., 0.5]
M_est = gain @ X
else:
assert mod is None
res = M - M_est
gof = 100 * (1. - (res * res).sum() / (M * M).sum())
gof_split = _split_gof(M, X, gain)
assert_allclose(gof_split.sum(), gof)
want = gof_split[[0, 0]]
if mod == 'augment':
want = np.concatenate((want, [[0]]))
if mod in ('mult', 'less'):
assert_array_less(gof_split[1], gof_split[0])
elif mod == 'zero':
assert_allclose(gof_split[0], gof_split.sum(0))
assert_allclose(gof_split[1], 0., atol=1e-6)
else:
assert_allclose(gof_split, want, atol=1e-12)
@testing.requires_testing_data
@pytest.mark.parametrize('idx, weights', [
# empirically determined approximately orthogonal columns: 0, 15157, 19448
([0], [1]),
([0, 15157], [1, 1]),
([0, 15157], [1, 3]),
([0, 15157], [5, -1]),
([0, 15157, 19448], [1, 1, 1]),
([0, 15157, 19448], [1e-2, 1, 5]),
])
def test_split_gof_meg(forward, idx, weights):
"""Test GOF splitting on MEG data."""
gain = forward['sol']['data'][:, idx]
# close to orthogonal
norms = np.linalg.norm(gain, axis=0)
triu = np.triu_indices(len(idx), 1)
prods = np.abs(np.dot(gain.T, gain) / np.outer(norms, norms))[triu]
assert_array_less(prods, 5e-3) # approximately orthogonal
# first, split across time (one dipole per time point)
M = gain * weights
gof_split = _split_gof(M, np.diag(weights), gain)
assert_allclose(gof_split.sum(0), 100., atol=1e-5) # all sum to 100
assert_allclose(gof_split, 100 * np.eye(len(weights)), atol=1) # loc
# next, summed to a single time point (all dipoles active at one time pt)
weights = np.array(weights)[:, np.newaxis]
x = gain @ weights
assert x.shape == (gain.shape[0], 1)
gof_split = _split_gof(x, weights, gain)
want = (norms * weights.T).T ** 2
want = 100 * want / want.sum()
assert_allclose(gof_split, want, atol=1e-3, rtol=1e-2)
assert_allclose(gof_split.sum(), 100, rtol=1e-5)
| |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from unittest import TestCase, main
from tempfile import mkstemp
from skbio.util import remove_files
from bfillings.rtax import Rtax, assign_taxonomy
class RtaxClassifierTests(TestCase):
""" Tests of the RTAX classifier module """
def setUp(self):
self.maxDiff = None
_, self.id_to_taxonomy_fp = mkstemp(prefix='RtaxTaxonAssignerTests_',
suffix='.txt')
_, self.input_seqs_fp = mkstemp(prefix='RtaxTaxonAssignerTests_',
suffix='.fasta')
_, self.reference_seqs_fp = mkstemp(prefix='RtaxTaxonAssignerTests_',
suffix='.fasta')
_, self.read_1_seqs_fp = mkstemp(prefix='RtaxTaxonAssignerTests_',
suffix='.fasta')
_, self.read_2_seqs_fp = mkstemp(prefix='RtaxTaxonAssignerTests_',
suffix='.fasta')
self._paths_to_clean_up = [self.id_to_taxonomy_fp,self.input_seqs_fp,self.reference_seqs_fp, self.read_1_seqs_fp,self.read_2_seqs_fp]
a = open(self.id_to_taxonomy_fp,'w')
a.write(rtax_reference_taxonomy)
a.close()
b = open(self.reference_seqs_fp,'w')
b.write(rtax_reference_fasta)
b.close()
c = open(self.input_seqs_fp,'w')
c.write(rtax_test_repset_fasta)
c.close()
d = open(self.read_1_seqs_fp,'w')
d.write(rtax_test_read1_fasta)
d.close()
e = open(self.read_2_seqs_fp,'w')
e.write(rtax_test_read2_fasta)
e.close()
def tearDown(self):
remove_files(set(self._paths_to_clean_up),error_on_missing=False)
def test_paired_end_classification(self):
self._paths_to_clean_up += cleanAll(self.read_1_seqs_fp)
self._paths_to_clean_up += cleanAll(self.read_2_seqs_fp)
result = assign_taxonomy(self.input_seqs_fp, self.reference_seqs_fp, self.id_to_taxonomy_fp, self.read_1_seqs_fp, self.read_2_seqs_fp,single_ok=False,header_id_regex="\\S+\\s+(\\S+?)\/")
self.assertEqual(result, rtax_expected_result_paired)
def test_paired_end_classification_with_fallback(self):
self._paths_to_clean_up += cleanAll(self.read_1_seqs_fp)
self._paths_to_clean_up += cleanAll(self.read_2_seqs_fp)
result = assign_taxonomy(self.input_seqs_fp, self.reference_seqs_fp, self.id_to_taxonomy_fp, self.read_1_seqs_fp, self.read_2_seqs_fp,single_ok=True,header_id_regex="\\S+\\s+(\\S+?)\/")
self.assertEqual(result, rtax_expected_result_paired_with_fallback)
def test_single_end_classification(self):
self._paths_to_clean_up += cleanAll(self.read_1_seqs_fp)
result = assign_taxonomy(self.input_seqs_fp, self.reference_seqs_fp, self.id_to_taxonomy_fp, self.read_1_seqs_fp, None ,header_id_regex="\\S+\\s+(\\S+?)\/")
self.assertEqual(result, rtax_expected_result_single)
# I'd like to add tests here that involve the TOOMANYHITS case. However, that requires either a reference
# database with >16,000 sequences, which we don't have handy for tests, or adjusting the maxMaxAccepts parameter to rtaxSearch.pl.
# However the "rtax" wrapper shell script currently doesn't allow setting that option, and I'd prefer to leave that as is
# unless someone actually wants to use it. Thus the TOOMANYHITS situation is not easily testable at the moment.
def cleanAll(path):
return [path, path + ".pos.db", path + ".pos.dir", path + ".pos.pag", path + ".lines.db", path + ".lines.dir", path + ".lines.pag"]
# sample data copied from GreenGenes
rtax_reference_taxonomy = """508720 99.0 k__Bacteria p__Actinobacteria c__Actinobacteria o__Actinomycetales f__Propionibacteriaceae g__Propionibacterium s__Propionibacterium acnes
508050 99.0 k__Bacteria p__Proteobacteria c__Betaproteobacteria o__Burkholderiales f__Comamonadaceae g__Diaphorobacter s__
502492 99.0 k__Bacteria p__Proteobacteria c__Betaproteobacteria o__Burkholderiales f__ g__Aquabacterium s__
"""
rtax_reference_fasta = """>508720
GACGAACGCTGGCGGCGTGCTTAACACATGCAAGTCGAACGGAAAGGCCCTGCTTTTGTGGGGTGCTCGAGTGGCGAACG
GGTGAGTAACACGTGAGTAACCTGCCCTTGACTTTGGGATAACTTCAGGAAACTGGGGCTAATACCGGATAGGAGCTCCT
GCTGCATGGTGGGGGTTGGAAAGTTTCGGCGGTTGGGGATGGACTCGCGGCTTATCAGCTTGTTGGTGGGGTAGTGGCTT
ACCAAGGCTTTGACGGGTAGCCGGCCTGAGAGGGTGACCGGCCACATTGGGACTGAGATACGGCCCAGACTCCTACGGGA
GGCAGCAGTGGGGAATATTGCACAATGGGCGGAAGCCTGATGCAGCAACGCCGCGTGCGGGATGACGGCCTTCGGGTTGT
AAACCGCTTTCGCCTGTGACGAAGCGTGAGTGACGGTAATGGGTAAAGAAGCACCGGCTAACTACGTGCCAGCAGCCGCG
GTGATACGTAGGGTGCGAGCGTTGTCCGGATTTATTGGGCGTAAAGGGCTCGTAGGTGGTTGATCGCGTCGGAAGTGTAA
TCTTGGGGCTTAACCCTGAGCGTGCTTTCGATACGGGTTGACTTGAGGAAGGTAGGGGAGAATGGAATTCCTGGTGGAGC
GGTGGAATGCGCAGATATCAGGAGGAACACCAGTGGCGAAGGCGGTTCTCTGGGCCTTTCCTGACGCTGAGGAGCGAAAG
CGTGGGGAGCGAACAGGCTTAGATACCCTGGTAGTCCACGCTGTAAACGGTGGGTACTAGGTGTGGGGTCCATTCCACGG
GTTCCGTGCCGTAGCTAACGCTTTAAGTACCCCGCCTGGGGAGTACGGCCGCAAGGCTAAAACTCAAAGGAATTGACGGG
GCCCCGCACAAGCGGCGGAGCATGCGGATTAATTCGATGCAACGCGTAGAACCTTACCTGGGTTTGACATGGATCGGGAG
TGCTCAGAGATGGGTGTGCCTCTTTTGGGGTCGGTTCACAGGTGGTGCATGGCTGTCGTCAGCTCGTGTCGTGAGATGTT
GGGTTAAGTCCCGCAACGAGCGCAACCCTTGTTCACTGTTGCCAGCACGTTATGGTGGGGACTCAGTGGAGACCGCCGGG
GTCAACTCGGAGGAAGGTGGGGATGACGTCAAGTCATCATGCCCCTTATGTCCAGGGCTTCACGCATGCTACAATGGCTG
GTACAGAGAGTGGCGAGCCTGTGAGGGTGAGCGAATCTCGGAAAGCCGGTCTCAGTTCGGATTGGGGTCTGCAACTCGAC
CTCATGAAGTCGGAGTCGCTAGTAATCGCAGATCAGCAACGCTGCGGTGAATACGTTCCCGGGGCT
>508050
ATTGAACGCTGGCGGCATGCCTTACACATGCAAGTCGAACGGTAACAGGTCTTCGGATGCTGACGAGTGGCGAACGGGTG
AGTAATACATCGGAACGTGCCCGATCGTGGGGGATAACGAGGCGAAAGCTTTGCTAATACCGCATACGATCTACGGATGA
AAGCGGGGGATCTTCGGACCTCGCGCGGACGGAGCGGCCGATGGCAGATTAGGTAGTTGGTGGGATAAAAGCTTACCAAG
CCGACGATCTGTAGCTGGTCTGAGAGGATGATCAGCCACACTGGGACTGAGACACGGCCCAGACTCCTACGGGAGGCAGC
AGTGGGGAATTTTGGACAATGGGCGAAAGCCTGATCCAGCCATGCCGCGTGCAGGATGAAGGCCTTCGGGTTGTAAACTG
CTTTTGTACGGAACGAAAAGCCTCTTTCTAATAAAGAGGGGTCATGACGGTACCGTAAGAATAAGCACCGGCTAACTACG
TGCCAGCAGCCGCGGTAATACGTAGGGTGCAAGCGTTAATCGGAATTACTGGGCGTAAAGCGTGCGCAGGCGGTTTTGTA
AGACAGAGGTGAAATCCCCGGGCTCAACCTGGGAACTGCCTTTGTGACTGCAAGGCTGGAGTGCGGCAGAGGGGGATGGA
ATTCCGCGTGTAGCAGTGAAATGCGTAGATATGCGGAGGAACACCGATGGCGAAGGCAATCCCCTGGGCCTGCACTGACG
CTCATGCACGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCCTAAACGATGTCAACTGGTTGTTG
GGTCTTCACTGACTCAGTAACGAAGCTAACGCGTGAAGTTGACCGCCTGGGGAGTACGGCCGCAAGGTTGAAACTCAAAG
GAATTGACGGGGACCCGCACAAGCGGTGGATGATGTGGTTTAATTCGATGCAACGCGAAAAACCTTACCCACCTTTGACA
TGGCAGGAAGTTTCCAGAGATGGATTCGTGCCCGAAAGGGAACCTGCACACAGGTGCTGCATGGCTGTCGTCAGCTCGTG
TCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGCCATTAGTTGCTACGAAAGGGCACTCTAATGGGACTG
CCGGTGACAAACCGGAGGAAGGTGGGGATGACGTCAAGTCCTCATGGCCCTTATAGGTGGGGCTACACACGTCATACAAT
GGCTGGTACAGAGGGTTGCCAACCCGCGAGGGGGAGCTAATCCCATAAAGCCAGTCGTAGTCCGGATCGCAGTCTGCAAC
TCGACTGCGTGAAGTCGGAATCGCTAGTAATCGCGGATCAGAATGTCGCGGTGAATACGTTCCCGGGTCT
>502492
ATTGAACGCTGGCGGCATGCCTTACACATGCAAGTCGAACGGTAACGGGTCCTTCGGGATGCCGACGAGTGGCGAACGGG
TGAGTAATATATCGGAACGTGCCCAGTAGTGGGGGATAACTGCTCGAAAGAGCAGCTAATACCGCATACGACCTGAGGGT
GAAAGGGGGGGATCGCAAGACCTCTCGCTATTGGAGCGGCCGATATCAGATTAGCTAGTTGGTGGGGTAAAGGCCTACCA
AGGCAACGATCTGTAGTTGGTCTGAGAGGACGACCAGCCACACTGGGACTGAGACACGGCCCAGACTCCTACGGGAGGCA
GCAGTGGGGAATTTTGGACAATGGGCGCAAGCCTGATCCAGCAATGCCGCGTGCAGGAAGAAGGCCTTCGGGTTGTAAAC
TGCTTTTGTCAGGGAAGAAATCTTCTGGGCTAATACCCCGGGAGGATGACGGTACCTGAAGAATAAGCACCGGCTAACTA
CGTGCCAGCAGCCGCGGTAATACGTAGGGTGCGAGCGTTAATCGGAATTACTGGGCGTAAAGCGTGCGCAGGCGGCTTTG
CAAGACAGATGTGAAATCCCCGGGCTCAACCTGGGAACTGCATTTGTGACTGCAAGGCTAGAGTACGGCAGAGGGGGATG
GAATTCCGCGTGTAGCAGTGAAATGCGTAGATATGCGGAGGAACACCAATGGCGAAGGCAATCCCCTGGGCCTGTACTGA
CGCTCATGCACGAAAGCGTGGGGAGCAAACAGGATTAGATACCCTGGTAGTCCACGCCCTAAACGATGTCAACTGGTTGT
TGGACGGCTTGCTGTTCAGTAACGAAGCTAACGCGTGAAGTTGACCGCCTGGGGAGTACGGCCGCAAGGTTGAAACTCAA
AGGAATTGACGGGGACCCGCACAAGCGGTGGATGATGTGGTTTAATTCGATGCAACGCGAAAAACCTTACCTACCCTTGA
CATGTCAAGAATTCTGCAGAGATGTGGAAGTGCTCGAAAGAGAACTTGAACACAGGTGCTGCATGGCCGTCGTCAGCTCG
TGTCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGTCATTAGTTGCTACGCAAGAGCACTCTAATGAGAC
TGCCGGTGACAAACCGGAGGAAGGTGGGGATGACGTCAGGTCCTCATGGCCCTTATGGGTAGGGCTACACACGTCATACA
ATGGCCGGTACAGAGGGCTGCCAACCCGCGAGGGGGAGCCAATCCCAGAAAACCGGTCGTAGTCCGGATCGTAGTCTGCA
ACTCGACTGCGTGAAGTCGGAATCGCTAGTAATCGCGGATCAGCTTGCCGCGGTGAATACGTTCCCGGGTCT
"""
rtax_test_repset_fasta = """>clusterIdA splitRead1IdA
ACCAAGGCTTTGACGGGTAGCCGGCCTGAGTGGGTGACCGGCCACATTGGGACTGAGATACGGCCCAGACTCCTACGGGA
>clusterIdB splitRead1IdB
CCGACGATCTGTAGCTGGTCTGAGAGGATGTTCAGCCACACTGGGACTGAGACACGGCCCAGACTCCTACGGGAGGCAGC
>clusterIdC splitRead1IdC
AGGCAACGATCTGTAGTTGGTCTGAGAGGAGGACCAGCCACACTGGGACTGAGACACGGCCCAGACTCCTACGGGAGGCA
>clusterIdD splitRead1IdD
AGGCAACGATCTGTAGTTGGTCTGAGAGGAGGACCAGCCACACTGGGACGGGGGGGGGGCCCAGACTCCTACGGGAGGCA
"""
# these reads are the 4th and 14th lines from the reference seqs
#rtax_test_read1_fasta = """>splitRead1IdA ampliconId_34563456/1
#ACCAAGGCTTTGACGGGTAGCCGGCCTGAGAGGGTGACCGGCCACATTGGGACTGAGATACGGCCCAGACTCCTACGGGA
#>splitRead1IdB ampliconId_
#CCGACGATCTGTAGCTGGTCTGAGAGGATGATCAGCCACACTGGGACTGAGACACGGCCCAGACTCCTACGGGAGGCAGC
#>splitRead1IdC ampliconId_
#AGGCAACGATCTGTAGTTGGTCTGAGAGGACGACCAGCCACACTGGGACTGAGACACGGCCCAGACTCCTACGGGAGGCA
#"""
#
#rtax_test_read2_fasta = """>splitRead2IdA ampliconId_34563456/3
#GGGTTAAGTCCCGCAACGAGCGCAACCCTTGTTCACTGTTGCCAGCACGTTATGGTGGGGACTCAGTGGAGACCGCCGGG
#>splitRead2IdB ampliconId_
#TCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGCCATTAGTTGCTACGAAAGGGCACTCTAATGGGACTG
#>splitRead2IdC ampliconId_
#TGTCGTGAGATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGTCATTAGTTGCTACGCAAGAGCACTCTAATGAGAC
#"""
# these reads are the 4th and 14th lines from the reference seqs, with one nucleotide changed each
# except D and E, which are unique to one read or the other
# and F and G, which are just decoys
rtax_test_read1_fasta = """>splitRead1IdA ampliconId_34563456/1
ACCAAGGCTTTGACGGGTAGCCGGCCTGAGTGGGTGACCGGCCACATTGGGACTGAGATACGGCCCAGACTCCTACGGGA
>splitRead1IdB ampliconId_12341234/1
CCGACGATCTGTAGCTGGTCTGAGAGGATGTTCAGCCACACTGGGACTGAGACACGGCCCAGACTCCTACGGGAGGCAGC
>splitRead1IdC ampliconId_23452345/1
AGGCAACGATCTGTAGTTGGTCTGAGAGGAGGACCAGCCACACTGGGACTGAGACACGGCCCAGACTCCTACGGGAGGCA
>splitRead1IdD ampliconId_45674567/1
AGGCAACGATCTGTAGTTGGTCTGAGAGGAGGACCAAAAAAAAAAAGACTGAGACACGGCCCAGACTCCTACGGGAGGCA
>splitRead1IdF ampliconId_56785678/1
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
"""
rtax_test_read2_fasta = """>splitRead2IdA ampliconId_34563456/3
GGGTTAAGTCCCGCAACGAGCGCAACCCTTATTCACTGTTGCCAGCACGTTATGGTGGGGACTCAGTGGAGACCGCCGGG
>splitRead2IdB ampliconId_12341234/3
TCGTGAGATGTTGGGTTAAGTCCCGCAACGTGCGCAACCCTTGCCATTAGTTGCTACGAAAGGGCACTCTAATGGGACTG
>splitRead2IdC ampliconId_23452345/3
TGTCGTGAGATGTTGGGTTAAGTCCCGCAAAGAGCGCAACCCTTGTCATTAGTTGCTACGCAAGAGCACTCTAATGAGAC
>splitRead2IdE ampliconId_67896789/3
TGTCGTGAGATGTTGGGTTAAAAAAAAAAAAAAACGCAACCCTTGTCATTAGTTGCTACGCAAGAGCACTCTAATGAGAC
>splitRead2IdG ampliconId_78907890/3
TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT
"""
rtax_expected_result_paired = {
'clusterIdA splitRead1IdA': ('k__Bacteria; p__Actinobacteria; c__Actinobacteria; o__Actinomycetales; f__Propionibacteriaceae; g__Propionibacterium; s__Propionibacterium acnes', 1.0),
'clusterIdB splitRead1IdB': ('k__Bacteria; p__Proteobacteria; c__Betaproteobacteria; o__Burkholderiales; f__Comamonadaceae; g__Diaphorobacter; s__', 1.0),
'clusterIdC splitRead1IdC': ('k__Bacteria; p__Proteobacteria; c__Betaproteobacteria; o__Burkholderiales; f__; g__Aquabacterium; s__', 1.0),
'clusterIdD splitRead1IdD': ('NOMATEPAIR', 1.0),
}
rtax_expected_result_paired_with_fallback = {
'clusterIdA splitRead1IdA': ('k__Bacteria; p__Actinobacteria; c__Actinobacteria; o__Actinomycetales; f__Propionibacteriaceae; g__Propionibacterium; s__Propionibacterium acnes', 1.0),
'clusterIdB splitRead1IdB': ('k__Bacteria; p__Proteobacteria; c__Betaproteobacteria; o__Burkholderiales; f__Comamonadaceae; g__Diaphorobacter; s__', 1.0),
'clusterIdC splitRead1IdC': ('k__Bacteria; p__Proteobacteria; c__Betaproteobacteria; o__Burkholderiales; f__; g__Aquabacterium; s__', 1.0),
'clusterIdD splitRead1IdD': ('k__Bacteria; p__Proteobacteria; c__Betaproteobacteria; o__Burkholderiales; f__; g__Aquabacterium; s__', 1.0),
}
rtax_expected_result_single = {
'clusterIdA splitRead1IdA': ('k__Bacteria; p__Actinobacteria; c__Actinobacteria; o__Actinomycetales; f__Propionibacteriaceae; g__Propionibacterium; s__Propionibacterium acnes', 1.0),
'clusterIdB splitRead1IdB': ('k__Bacteria; p__Proteobacteria; c__Betaproteobacteria; o__Burkholderiales; f__Comamonadaceae; g__Diaphorobacter; s__', 1.0),
'clusterIdC splitRead1IdC': ('k__Bacteria; p__Proteobacteria; c__Betaproteobacteria; o__Burkholderiales; f__; g__Aquabacterium; s__', 1.0),
'clusterIdD splitRead1IdD': ('k__Bacteria; p__Proteobacteria; c__Betaproteobacteria; o__Burkholderiales; f__; g__Aquabacterium; s__', 1.0),
}
if __name__ == "__main__":
main()
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
import subprocess
from collections import defaultdict, namedtuple
from textwrap import dedent
from pants.base.address import SyntheticAddress
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.generator import Generator, TemplateData
from pants.base.source_root import SourceRoot
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.memo import memoized_property
from pants.contrib.go.subsystems.fetchers import Fetchers
from pants.contrib.go.targets.go_binary import GoBinary
from pants.contrib.go.targets.go_library import GoLibrary
from pants.contrib.go.targets.go_local_source import GoLocalSource
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.tasks.go_task import GoTask
class GoTargetGenerator(object):
"""Automatically generates a Go target graph given pre-existing target roots."""
class GenerationError(Exception):
"""Raised to indicate an error auto-generating a Go target."""
class WrongLocalSourceTargetTypeError(GenerationError):
"""Indicates a local source target was defined with the wrong type.
For example, a Go main package was defined as a GoLibrary instead of a GoBinary.
"""
class NewRemoteEncounteredButRemotesNotAllowedError(GenerationError):
"""Indicates a new remote library dependency was found but --remote was not enabled."""
def __init__(self, workunit_factory, go_distribution, build_graph, local_root, fetchers,
generate_remotes=False, remote_root=None):
self._workunit_factory = workunit_factory
self._go_distribution = go_distribution
self._build_graph = build_graph
self._local_source_root = local_root
self._fetchers = fetchers
self._generate_remotes = generate_remotes
self._remote_source_root = remote_root
def generate(self, local_go_targets):
"""Automatically generates a Go target graph for the given local go targets.
:param iter local_go_targets: The target roots to fill in a target graph for.
:raises: :class:`GoTargetGenerator.GenerationError` if any missing targets cannot be generated.
"""
visited = {l.import_path: l.address for l in local_go_targets}
with temporary_dir() as gopath:
for local_go_target in local_go_targets:
name, import_paths = self._list_deps(gopath, local_go_target.address)
self._generate_missing(gopath, local_go_target.address, name, import_paths, visited)
return visited.items()
def _generate_missing(self, gopath, local_address, name, import_paths, visited):
target_type = GoBinary if name == 'main' else GoLibrary
existing = self._build_graph.get_target(local_address)
if not existing:
self._build_graph.inject_synthetic_target(address=local_address, target_type=target_type)
elif existing and not isinstance(existing, target_type):
raise self.WrongLocalSourceTargetTypeError('{} should be a {}'
.format(existing, target_type.__name__))
for import_path in import_paths:
if import_path not in self._go_stdlib:
if import_path not in visited:
fetcher = self._fetchers.maybe_get_fetcher(import_path)
if fetcher:
remote_root = fetcher.root(import_path)
remote_pkg_path = GoRemoteLibrary.remote_package_path(remote_root, import_path)
name = remote_pkg_path or os.path.basename(import_path)
address = SyntheticAddress(os.path.join(self._remote_source_root, remote_root), name)
found = self._build_graph.get_target(address)
if not found:
if not self._generate_remotes:
raise self.NewRemoteEncounteredButRemotesNotAllowedError(
'Cannot generate dependency for remote import path {}'.format(import_path))
self._build_graph.inject_synthetic_target(address=address,
target_type=GoRemoteLibrary,
pkg=remote_pkg_path)
else:
# Recurse on local targets.
address = SyntheticAddress(os.path.join(self._local_source_root, import_path),
os.path.basename(import_path))
name, import_paths = self._list_deps(gopath, address)
self._generate_missing(gopath, address, name, import_paths, visited)
visited[import_path] = address
dependency_address = visited[import_path]
self._build_graph.inject_dependency(local_address, dependency_address)
@memoized_property
def _go_stdlib(self):
out = self._go_distribution.create_go_cmd('list', args=['std']).check_output()
return frozenset(out.strip().split())
def _list_deps(self, gopath, local_address):
# TODO(John Sirois): Lift out a local go sources target chroot util - GoWorkspaceTask and
# GoTargetGenerator both create these chroot symlink trees now.
import_path = GoLocalSource.local_import_path(self._local_source_root, local_address)
src_path = os.path.join(gopath, 'src', import_path)
safe_mkdir(src_path)
package_src_root = os.path.join(get_buildroot(), local_address.spec_path)
for source_file in os.listdir(package_src_root):
source_path = os.path.join(package_src_root, source_file)
if GoLocalSource.is_go_source(source_path):
dest_path = os.path.join(src_path, source_file)
os.symlink(source_path, dest_path)
# TODO(John Sirois): Lift up a small `go list utility` - GoFetch and GoTargetGenerator both use
# this go command now as well as a version of the stdlib gathering done above in _go_stdlib.
go_cmd = self._go_distribution.create_go_cmd('list', args=['-json', import_path], gopath=gopath)
with self._workunit_factory(local_address.reference(),
cmd=str(go_cmd),
labels=[WorkUnitLabel.TOOL]) as workunit:
# TODO(John Sirois): It would be nice to be able to tee the stdout to the workunit to we have
# a capture of the json available for inspection in the server console.
process = go_cmd.spawn(stdout=subprocess.PIPE, stderr=workunit.output('stderr'))
out, _ = process.communicate()
returncode = process.returncode
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
if returncode != 0:
raise self.GenerationError('Problem listing imports for {}: {} failed with exit code {}'
.format(local_address, go_cmd, returncode))
data = json.loads(out)
return data.get('Name'), data.get('Imports', []) + data.get('TestImports', [])
class GoBuilden(GoTask):
"""Automatically generates Go BUILD files."""
@classmethod
def global_subsystems(cls):
return super(GoBuilden, cls).global_subsystems() + (Fetchers,)
@classmethod
def _default_template(cls):
return dedent("""\
{{target.type}}(
{{#target.name}}
name='{{.}}',
{{/target.name}}
{{#target.deps?}}
dependencies=[
{{#target.deps}}
'{{.}}',
{{/target.deps}}
]
{{/target.deps?}}
{{#target.rev}}
rev='{{.}}',
{{/target.rev}}
{{#target.pkgs?}}
packages=[
{{#target.pkgs}}
'{{.}}',
{{/target.pkgs}}
]
{{/target.pkgs?}}
)
""")
@classmethod
def register_options(cls, register):
register('--remote', action='store_true', advanced=True, fingerprint=True,
help='Allow auto-generation of remote dependencies without pinned versions.')
register('--materialize', action='store_true', advanced=True, fingerprint=True,
help='Instead of just auto-generating missing go_binary and go_library targets in '
'memory, (re-)generate them on disk using the installed Go BUILD file template.')
# TODO(John Sirois): Support loading the template from disk and add docs for the template
# parameters.
# This disk loading will come for free when the options system supports argfile syntax, ie:
# --template=@argfile
register('--template', metavar='<template>',
default=cls._default_template(),
advanced=True, fingerprint=True,
help='A Go BUILD file mustache template to use with --materialize.')
register('--extension', default='', metavar='<ext>', advanced=True, fingerprint=True,
help='An optional extension for all materialized BUILD files (should include the .)')
def execute(self):
local_go_targets = self.context.targets(self.is_local_src)
if not local_go_targets:
return
generated = self.generate_targets(local_go_targets)
if not self.get_options().materialize:
msg = ('Auto generated the following Go targets: target (import path):\n\t{}'
.format('\n\t'.join(sorted('{} ({})'.format(addr.reference(), ip)
for ip, addr in generated))))
self.context.log.info(msg)
elif generated:
self._materialize()
class TemplateResult(namedtuple('TemplateResult', ['build_file_path', 'data', 'import_paths',
'needs_rev', 'rev'])):
def log(self, logger):
log = logger.warn if (self.needs_rev and not self.rev) else logger.info
log('\t{} ({}){}'.format(self.build_file_path,
' '.join(sorted(self.import_paths)),
' {}'.format(self.rev or 'FLOATING') if self.needs_rev else ''))
def _materialize(self):
self.context.log.info('Auto generated the following Go BUILD files: BUILD file '
'(import paths)')
for result in self.generate_build_files():
result.log(self.context.log)
class NoLocalRootsError(TaskError):
"""Indicates the Go local source owning targets' source roots are invalid."""
class InvalidLocalRootsError(TaskError):
"""Indicates the Go local source owning targets' source roots are invalid."""
class UnrootedLocalSourceError(TaskError):
"""Indicates there are Go local source owning targets that fall outside the source root."""
class InvalidRemoteRootsError(TaskError):
"""Indicates the Go remote library source roots are invalid."""
class GenerationError(TaskError):
"""Indicates an error generating Go targets."""
def __init__(self, cause):
super(GoBuilden.GenerationError, self).__init__(str(cause))
self.cause = cause
def generate_targets(self, local_go_targets):
# TODO(John Sirois): support multiple source roots like GOPATH does?
# The GOPATH's 1st element is read-write, the reast are read-only; ie: their sources build to
# the 1st element's pkg/ and bin/ dirs.
all_rooted_types = set()
for types in SourceRoot.all_roots().values():
all_rooted_types.update(types)
def safe_get_source_roots(target_type):
return set(SourceRoot.roots(target_type)) if target_type in all_rooted_types else set()
local_roots = safe_get_source_roots(GoBinary) | safe_get_source_roots(GoLibrary)
if not local_roots:
raise self.NoLocalRootsError('Can only BUILD gen if a Go local sources source root is'
'defined.')
if len(local_roots) > 1:
raise self.InvalidLocalRootsError('Can only BUILD gen for a single Go local sources source '
'root, found:\n\t{}'
.format('\n\t'.join(sorted(local_roots))))
local_root = local_roots.pop()
unrooted_locals = {t for t in local_go_targets if t.target_base != local_root}
if unrooted_locals:
raise self.UnrootedLocalSourceError('Cannot BUILD gen until the following targets are '
'relocated to the build root at {}:\n\t{}'
.format(local_root,
'\n\t'.join(sorted(t.address.reference()
for t in unrooted_locals))))
remote_roots = set(safe_get_source_roots(GoRemoteLibrary))
if len(remote_roots) > 1:
raise self.InvalidRemoteRootsError('Can only BUILD gen for a single Go remote library source '
'root, found:\n\t{}'
.format('\n\t'.join(sorted(remote_roots))))
remote_root = remote_roots.pop() if remote_roots else None
generator = GoTargetGenerator(self.context.new_workunit,
self.go_dist,
self.context.build_graph,
local_root,
Fetchers.global_instance(),
generate_remotes=self.get_options().remote,
remote_root=remote_root)
with self.context.new_workunit('go.buildgen', labels=[WorkUnitLabel.MULTITOOL]):
try:
return generator.generate(local_go_targets)
except generator.GenerationError as e:
raise self.GenerationError(e)
def generate_build_files(self):
goal_name = self.options_scope
flags = '--materialize'
if self.get_options().remote:
flags += ' --remote'
template_header = dedent("""\
# Auto-generated by pants!
# To re-generate run: `pants {goal_name} {flags}`
""").format(goal_name=goal_name, flags=flags)
template_text = template_header + self.get_options().template
build_file_basename = 'BUILD' + self.get_options().extension
targets_by_spec_path = defaultdict(set)
for target in self.context.targets(self.is_go):
targets_by_spec_path[target.address.spec_path].add(target)
for spec_path, targets in targets_by_spec_path.items():
rel_path = os.path.join(spec_path, build_file_basename)
result = self._create_template_data(rel_path, list(targets))
if result:
generator = Generator(template_text, target=result.data)
build_file_path = os.path.join(get_buildroot(), rel_path)
with safe_open(build_file_path, mode='w') as fp:
generator.write(stream=fp)
yield result
class NonUniformRemoteRevsError(TaskError):
"""Indicates packages with mis-matched versions are defined for a single remote root."""
def _create_template_data(self, build_file_path, targets):
if len(targets) == 1 and self.is_local_src(targets[0]):
local_target = targets[0]
data = self._data(target_type='go_binary' if self.is_binary(local_target) else 'go_library',
name=local_target.name,
deps=[d.address.reference() for d in local_target.dependencies])
return self.TemplateResult(build_file_path=build_file_path,
data=data,
import_paths=[local_target.import_path],
needs_rev=False,
rev=None)
elif self.get_options().remote:
if len(targets) == 1 and not targets[0].pkg:
remote_lib = targets[0]
data = self._data(target_type='go_remote_library',
name=remote_lib.name,
rev=remote_lib.rev)
return self.TemplateResult(build_file_path=build_file_path,
data=data,
import_paths=(remote_lib.import_path,),
needs_rev=True,
rev=remote_lib.rev)
else:
revs = {t.rev for t in targets if t.rev}
if len(revs) > 1:
msg = ('Cannot create BUILD file {} for the following packages at remote root {}, '
'they must all have the same version:\n\t{}'
.format(build_file_path, targets[0].remote_root,
'\n\t'.join('{} {}'.format(t.pkg, t.rev) for t in targets)))
raise self.NonUniformRemoteRevsError(msg)
rev = revs.pop() if revs else None
data = self._data(target_type='go_remote_libraries',
rev=rev,
pkgs=sorted({t.pkg for t in targets}))
return self.TemplateResult(build_file_path=build_file_path,
data=data,
import_paths=tuple(t.import_path for t in targets),
needs_rev=True,
rev=rev)
else:
return None
def _data(self, target_type, name=None, deps=None, rev=None, pkgs=None):
return TemplateData(type=target_type, name=name, deps=deps, rev=rev, pkgs=pkgs)
| |
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an S3-like storage server based on local files.
Useful to test features that will eventually run on S3, or if you want to
run something locally that was once running on S3.
We don't support all the features of S3, but it does work with the
standard S3 client for the most basic semantics. To use the standard
S3 client with this module:
c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
is_secure=False)
c.create_bucket("mybucket")
c.put("mybucket", "mykey", "a value")
print c.get("mybucket", "mykey").body
"""
import bisect
import datetime
import hashlib
import os
import os.path
import urllib
from tornado import escape
from tornado import httpserver
from tornado import ioloop
from tornado import web
from tornado.util import unicode_type
from tornado.options import options, define
try:
long
except NameError:
long = int
define("port", default=9888, help="TCP port to listen on")
define("root_directory", default="/tmp/s3", help="Root storage directory")
define("bucket_depth", default=0, help="Bucket file system depth limit")
def start(port, root_directory, bucket_depth):
"""Starts the mock S3 server on the given port at the given path."""
application = S3Application(root_directory, bucket_depth)
http_server = httpserver.HTTPServer(application)
http_server.listen(port)
ioloop.IOLoop.current().start()
class S3Application(web.Application):
"""Implementation of an S3-like storage server based on local files.
If bucket depth is given, we break files up into multiple directories
to prevent hitting file system limits for number of files in each
directories. 1 means one level of directories, 2 means 2, etc.
"""
def __init__(self, root_directory, bucket_depth=0):
web.Application.__init__(self, [
(r"/", RootHandler),
(r"/([^/]+)/(.+)", ObjectHandler),
(r"/([^/]+)/", BucketHandler),
])
self.directory = os.path.abspath(root_directory)
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.bucket_depth = bucket_depth
class BaseRequestHandler(web.RequestHandler):
SUPPORTED_METHODS = ("PUT", "GET", "DELETE")
def render_xml(self, value):
assert isinstance(value, dict) and len(value) == 1
self.set_header("Content-Type", "application/xml; charset=UTF-8")
name = list(value.keys())[0]
parts = []
parts.append('<' + name +
' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
self._render_parts(value[name], parts)
parts.append('</' + name + '>')
self.finish('<?xml version="1.0" encoding="UTF-8"?>\n' +
''.join(parts))
def _render_parts(self, value, parts=[]):
if isinstance(value, (unicode_type, bytes)):
parts.append(escape.xhtml_escape(value))
elif isinstance(value, (int, long)):
parts.append(str(value))
elif isinstance(value, datetime.datetime):
parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
elif isinstance(value, dict):
for name, subvalue in value.items():
if not isinstance(subvalue, list):
subvalue = [subvalue]
for subsubvalue in subvalue:
parts.append('<' + name + '>')
self._render_parts(subsubvalue, parts)
parts.append('</' + name + '>')
else:
raise Exception("Unknown S3 value type %r", value)
def _object_path(self, bucket, object_name):
if self.application.bucket_depth < 1:
return os.path.abspath(os.path.join(
self.application.directory, bucket, object_name))
hash = hashlib.md5(object_name).hexdigest()
path = os.path.abspath(os.path.join(
self.application.directory, bucket))
for i in range(self.application.bucket_depth):
path = os.path.join(path, hash[:2 * (i + 1)])
return os.path.join(path, object_name)
class RootHandler(BaseRequestHandler):
def get(self):
names = os.listdir(self.application.directory)
buckets = []
for name in names:
path = os.path.join(self.application.directory, name)
info = os.stat(path)
buckets.append({
"Name": name,
"CreationDate": datetime.datetime.utcfromtimestamp(
info.st_ctime),
})
self.render_xml({"ListAllMyBucketsResult": {
"Buckets": {"Bucket": buckets},
}})
class BucketHandler(BaseRequestHandler):
def get(self, bucket_name):
prefix = self.get_argument("prefix", u"")
marker = self.get_argument("marker", u"")
max_keys = int(self.get_argument("max-keys", 50000))
path = os.path.abspath(os.path.join(self.application.directory,
bucket_name))
terse = int(self.get_argument("terse", 0))
if not path.startswith(self.application.directory) or \
not os.path.isdir(path):
raise web.HTTPError(404)
object_names = []
for root, dirs, files in os.walk(path):
for file_name in files:
object_names.append(os.path.join(root, file_name))
skip = len(path) + 1
for i in range(self.application.bucket_depth):
skip += 2 * (i + 1) + 1
object_names = [n[skip:] for n in object_names]
object_names.sort()
contents = []
start_pos = 0
if marker:
start_pos = bisect.bisect_right(object_names, marker, start_pos)
if prefix:
start_pos = bisect.bisect_left(object_names, prefix, start_pos)
truncated = False
for object_name in object_names[start_pos:]:
if not object_name.startswith(prefix):
break
if len(contents) >= max_keys:
truncated = True
break
object_path = self._object_path(bucket_name, object_name)
c = {"Key": object_name}
if not terse:
info = os.stat(object_path)
c.update({
"LastModified": datetime.datetime.utcfromtimestamp(
info.st_mtime),
"Size": info.st_size,
})
contents.append(c)
marker = object_name
self.render_xml({"ListBucketResult": {
"Name": bucket_name,
"Prefix": prefix,
"Marker": marker,
"MaxKeys": max_keys,
"IsTruncated": truncated,
"Contents": contents,
}})
def put(self, bucket_name):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if not path.startswith(self.application.directory) or \
os.path.exists(path):
raise web.HTTPError(403)
os.makedirs(path)
self.finish()
def delete(self, bucket_name):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if not path.startswith(self.application.directory) or \
not os.path.isdir(path):
raise web.HTTPError(404)
if len(os.listdir(path)) > 0:
raise web.HTTPError(403)
os.rmdir(path)
self.set_status(204)
self.finish()
class ObjectHandler(BaseRequestHandler):
def get(self, bucket, object_name):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if not path.startswith(self.application.directory) or \
not os.path.isfile(path):
raise web.HTTPError(404)
info = os.stat(path)
self.set_header("Content-Type", "application/unknown")
self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp(
info.st_mtime))
object_file = open(path, "rb")
try:
self.finish(object_file.read())
finally:
object_file.close()
def put(self, bucket, object_name):
object_name = urllib.unquote(object_name)
bucket_dir = os.path.abspath(os.path.join(
self.application.directory, bucket))
if not bucket_dir.startswith(self.application.directory) or \
not os.path.isdir(bucket_dir):
raise web.HTTPError(404)
path = self._object_path(bucket, object_name)
if not path.startswith(bucket_dir) or os.path.isdir(path):
raise web.HTTPError(403)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
object_file = open(path, "w")
object_file.write(self.request.body)
object_file.close()
self.finish()
def delete(self, bucket, object_name):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if not path.startswith(self.application.directory) or \
not os.path.isfile(path):
raise web.HTTPError(404)
os.unlink(path)
self.set_status(204)
self.finish()
if __name__ == "__main__":
options.parse_command_line()
start(options.port, options.root_directory, options.bucket_depth)
| |
# -*- coding: utf-8 -*-
'''
Connection library for VMWare
.. versionadded:: 2015.8.2
This is a base library used by a number of VMWare services such as VMWare
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import
import atexit
import logging
import time
# Import Salt Libs
from salt.exceptions import SaltSystemExit
import salt.modules.cmdmod
import salt.utils
# Import Third Party Libs
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
else:
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:return: Dictionary
'''
esx_cmd = salt.utils.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def get_service_instance(host, username, password, protocol=None, port=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
password
The password used to login to the vCenter server or ESX/ESXi host.
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
if service_instance._GetStub().host == ':'.join([host, str(port)]):
return service_instance
Disconnect(service_instance)
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port
)
except Exception as exc:
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and '[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(exc):
import ssl
default_context = ssl._create_default_https_context
ssl._create_default_https_context = ssl._create_unverified_context
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port
)
ssl._create_default_https_context = default_context
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.debug(exc)
raise SaltSystemExit(err_msg)
except Exception as exc:
if 'certificate verify failed' in str(exc):
import ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context
)
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.debug(exc)
raise SaltSystemExit(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_content(service_instance, obj_type, property_list=None, container_ref=None):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = service_instance.content.rootFolder
# Create an object view
obj_view = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
# Create traversal spec to determine the path for collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_view,
skip=True,
selectSet=[traversal_spec]
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
# Destroy the object view
obj_view.Destroy()
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
if obj[property_name] == property_value:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None, container_ref=None):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get all the content
content = get_content(service_instance, object_type, property_list=property_list, container_ref=container_ref)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
return object_list
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == "vmxnet":
return vim.vm.device.VirtualVmxnet()
elif adapter_type == "vmxnet2":
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == "vmxnet3":
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == "e1000":
return vim.vm.device.VirtualE1000()
elif adapter_type == "e1000e":
return vim.vm.device.VirtualE1000e()
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
property_list
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again. Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``, but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
while task.info.state == 'running' or task.info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
if task.info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
else:
raise Exception(task.info.error)
| |
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
def _check_forward(e1, e2, f, y_expect):
e1 = chainer.Variable(e1)
e2 = chainer.Variable(e2)
y = f(e1, e2)
testing.assert_allclose(y_expect, y.data)
def _check_backward(e1, e2, y_grad, link, bias):
params = [link.W]
if bias:
params.append(link.b)
gradient_check.check_backward(
link, (e1, e2), y_grad, params, eps=1e-2, rtol=1e-3)
def _batch_to_gpu(*xs):
return tuple(cuda.to_gpu(x) for x in xs)
def _uniform(*shape):
return numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
def _as_mat(x):
return x.reshape(len(x), -1)
class TestBilinear(unittest.TestCase):
in_shape = (3, 4)
out_size = 4
batch_size = 10
def setUp(self):
self.f = links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size)
self.f.W.data[...] = _uniform(*self.f.W.data.shape)
self.f.V1.data[...] = _uniform(*self.f.V1.data.shape)
self.f.V2.data[...] = _uniform(*self.f.V2.data.shape)
self.f.b.data[...] = _uniform(*self.f.b.data.shape)
self.f.cleargrads()
self.W = self.f.W.data.copy()
self.V1 = self.f.V1.data.copy()
self.V2 = self.f.V2.data.copy()
self.b = self.f.b.data.copy()
self.e1 = _uniform(self.batch_size, self.in_shape[0])
self.e2 = _uniform(self.batch_size, self.in_shape[1])
self.gy = _uniform(self.batch_size, self.out_size)
self.y = (
numpy.einsum('ij,ik,jkl->il', self.e1, self.e2, self.W) +
self.e1.dot(self.V1) + self.e2.dot(self.V2) + self.b)
@condition.retry(3)
def test_forward_cpu(self):
_check_forward(self.e1, self.e2, self.f, self.y)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.f.to_gpu()
_check_forward(cuda.to_gpu(self.e1),
cuda.to_gpu(self.e2),
self.f, self.y)
@condition.retry(3)
def test_backward_cpu(self):
_check_backward(self.e1, self.e2, self.gy, self.f, True)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.f.to_gpu()
_check_backward(cuda.to_gpu(self.e1),
cuda.to_gpu(self.e2),
cuda.to_gpu(self.gy),
self.f, True)
class TestBilinear2(TestBilinear):
def setUp(self):
super(TestBilinear2, self).setUp()
assert self.in_shape[1] % 2 == 0
self.e1 = _uniform(self.batch_size, 1, self.in_shape[0])
self.e2 = _uniform(self.batch_size, self.in_shape[1] // 2, 2)
self.gy = _uniform(self.batch_size, self.out_size)
e1 = _as_mat(self.e1)
e2 = _as_mat(self.e2)
self.y = (
numpy.einsum('ij,ik,jkl->il', e1, e2, self.W) +
e1.dot(self.V1) + e2.dot(self.V2) + self.b)
class TestBilinear3(TestBilinear):
out_size = 1
class TestBilinear4(TestBilinear):
in_shape = (1, 2)
class TestBilinear5(TestBilinear):
in_shape = (2, 1)
class TestBilinear6(TestBilinear):
in_shape = (1, 1)
class TestBilinear7(TestBilinear):
in_shape = (1, 2)
out_size = 1
class TestBilinear8(TestBilinear):
in_shape = (2, 1)
out_size = 1
class TestBilinear9(TestBilinear):
in_shape = (1, 1)
out_size = 1
class TestBilinearWOBias(TestBilinear):
def setUp(self):
self.f = links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size, True)
W = self.f.W.data
W[...] = numpy.random.uniform(-1, 1, W.shape)
self.f.cleargrads()
self.W = W.copy()
self.e1 = _uniform(self.batch_size, self.in_shape[0])
self.e2 = _uniform(self.batch_size, self.in_shape[1])
self.gy = _uniform(self.batch_size, self.out_size)
self.y = numpy.einsum('ij,ik,jkl->il', self.e1, self.e2, self.W)
@condition.retry(3)
def test_backward_cpu(self):
_check_backward(self.e1, self.e2, self.gy, self.f, False)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.f.to_gpu()
_check_backward(cuda.to_gpu(self.e1), cuda.to_gpu(self.e2),
cuda.to_gpu(self.gy), self.f, False)
class TestBilinearWOBias2(TestBilinearWOBias):
def setUp(self):
super(TestBilinearWOBias2, self).setUp()
assert self.in_shape[1] % 2 == 0
self.e1 = _uniform(self.batch_size, 1, self.in_shape[0])
self.e2 = _uniform(self.batch_size, 2, self.in_shape[1] // 2)
self.gy = _uniform(self.batch_size, self.out_size)
self.y = numpy.einsum(
'ij,ik,jkl->il', _as_mat(self.e1), _as_mat(self.e2), self.W)
class TestBilinearWOBias3(TestBilinearWOBias):
out_size = 1
class TestBilinearWOBias4(TestBilinearWOBias):
in_shape = (1, 2)
class TestBilinearWOBias5(TestBilinearWOBias):
in_shape = (2, 1)
class TestBilinearWOBias6(TestBilinearWOBias):
in_shape = (1, 1)
class TestBilinearWOBias7(TestBilinearWOBias):
in_shape = (1, 2)
out_size = 1
class TestBilinearWOBias8(TestBilinearWOBias):
in_shape = (2, 1)
out_size = 1
class TestBilinearWOBias9(TestBilinearWOBias):
in_shape = (1, 1)
out_size = 1
class InitByInitialParameter(unittest.TestCase):
in_shape = (2, 3)
out_size = 4
batch_size = 10
def setUp(self):
self.W = _uniform(self.in_shape[0], self.in_shape[1], self.out_size)
self.V1 = _uniform(self.in_shape[0], self.out_size)
self.V2 = _uniform(self.in_shape[1], self.out_size)
self.b = _uniform(self.out_size,)
class NormalInitialParameter(InitByInitialParameter):
def check_normal(self, initialW, initial_bias, nobias):
links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size, nobias,
initialW, initial_bias)
def test_normal_cpu_bias(self):
self.check_normal(self.W, (self.V1, self.V2, self.b), False)
class InvalidInitialParameter(InitByInitialParameter):
def setUp(self):
super(InvalidInitialParameter, self).setUp()
self.invalidW = _uniform(self.in_shape[0] + 1, self.in_shape[1],
self.out_size)
self.invalidV1 = _uniform(self.in_shape[0] + 1, self.out_size)
self.invalidV2 = _uniform(self.in_shape[1] + 1, self.out_size)
self.invalidb = _uniform(self.out_size + 1,)
def check_invalid(self, initialW, initial_bias, nobias):
with self.assertRaises(AssertionError):
links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size, nobias,
initialW, initial_bias)
def test_invalidW_cpu(self):
self.check_invalid(self.invalidW, (self.V1, self.V2, self.b), False)
def test_invalidW_cpu2(self):
self.check_invalid(self.invalidW, None, True)
def test_invalidV1_cpu(self):
self.check_invalid(self.W, (self.invalidV1, self.V2, self.b), False)
def test_invalidV2_cpu(self):
self.check_invalid(self.W, (self.V1, self.invalidV2, self.b), False)
def test_invalidb_cpu(self):
self.check_invalid(self.W, (self.V1, self.V2, self.invalidb), False)
testing.run_module(__name__, __file__)
| |
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for Oppia suggestions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from core.platform import models
import feconf
from google.appengine.ext import ndb
(base_models, user_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.user])
# Constants defining types of entities to which suggestions can be created.
TARGET_TYPE_EXPLORATION = 'exploration'
TARGET_TYPE_QUESTION = 'question'
TARGET_TYPE_SKILL = 'skill'
TARGET_TYPE_TOPIC = 'topic'
TARGET_TYPE_CHOICES = [
TARGET_TYPE_EXPLORATION,
TARGET_TYPE_QUESTION,
TARGET_TYPE_SKILL,
TARGET_TYPE_TOPIC
]
# Constants defining the different possible statuses of a suggestion.
STATUS_ACCEPTED = 'accepted'
STATUS_IN_REVIEW = 'review'
STATUS_REJECTED = 'rejected'
STATUS_CHOICES = [
STATUS_ACCEPTED,
STATUS_IN_REVIEW,
STATUS_REJECTED
]
# Constants defining various suggestion types.
SUGGESTION_TYPE_EDIT_STATE_CONTENT = 'edit_exploration_state_content'
SUGGESTION_TYPE_TRANSLATE_CONTENT = 'translate_content'
SUGGESTION_TYPE_ADD_QUESTION = 'add_question'
SUGGESTION_TYPE_CHOICES = [
SUGGESTION_TYPE_EDIT_STATE_CONTENT,
SUGGESTION_TYPE_TRANSLATE_CONTENT,
SUGGESTION_TYPE_ADD_QUESTION
]
# Daily emails are sent to reviewers to notify them of suggestions on the
# Contributor Dashboard to review. The constants below define the number of
# question and translation suggestions to fetch to come up with these daily
# suggestion recommendations.
MAX_QUESTION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS = 30
MAX_TRANSLATION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS = 30
# Defines what is the minimum role required to review suggestions
# of a particular type.
SUGGESTION_MINIMUM_ROLE_FOR_REVIEW = {
SUGGESTION_TYPE_EDIT_STATE_CONTENT: feconf.ROLE_ID_EXPLORATION_EDITOR
}
# Constants defining various contribution types.
SCORE_TYPE_CONTENT = 'content'
SCORE_TYPE_TRANSLATION = 'translation'
SCORE_TYPE_QUESTION = 'question'
SCORE_TYPE_CHOICES = [
SCORE_TYPE_CONTENT,
SCORE_TYPE_TRANSLATION,
SCORE_TYPE_QUESTION
]
# The delimiter to be used in score category field.
SCORE_CATEGORY_DELIMITER = '.'
ALLOWED_QUERY_FIELDS = ['suggestion_type', 'target_type', 'target_id',
'status', 'author_id', 'final_reviewer_id',
'score_category', 'language_code']
# Threshold number of days after which suggestion will be accepted.
THRESHOLD_DAYS_BEFORE_ACCEPT = 7
# Threshold time after which suggestion is considered stale and auto-accepted.
THRESHOLD_TIME_BEFORE_ACCEPT_IN_MSECS = (
THRESHOLD_DAYS_BEFORE_ACCEPT * 24 * 60 * 60 * 1000)
# The default message to be shown when accepting stale suggestions.
DEFAULT_SUGGESTION_ACCEPT_MESSAGE = (
'Automatically accepting suggestion after'
' %d days' % THRESHOLD_DAYS_BEFORE_ACCEPT)
# The message to be shown when rejecting a suggestion with a target ID of a
# deleted skill.
DELETED_SKILL_REJECT_MESSAGE = 'The associated skill no longer exists.'
# The message to be shown when rejecting a translation suggestion that is
# associated with an exploration that no longer corresponds to the story.
# The story could have been deleted or the exploration could have been removed
# from the story.
INVALID_STORY_REJECT_TRANSLATION_SUGGESTIONS_MSG = (
'This text snippet has been removed from the story, and no longer needs '
'translation. Sorry about that!'
)
# The amount to increase the score of the author by after successfuly getting an
# accepted suggestion.
INCREMENT_SCORE_OF_AUTHOR_BY = 1
# Action types for incoming requests to the suggestion action handlers.
ACTION_TYPE_ACCEPT = 'accept'
ACTION_TYPE_REJECT = 'reject'
class GeneralSuggestionModel(base_models.BaseModel):
"""Model to store suggestions made by Oppia users.
The ID of the suggestions created is the same as the ID of the thread
linked to the suggestion.
"""
# The type of suggestion.
suggestion_type = ndb.StringProperty(
required=True, indexed=True, choices=SUGGESTION_TYPE_CHOICES)
# The type of the target entity which the suggestion is linked to.
target_type = ndb.StringProperty(
required=True, indexed=True, choices=TARGET_TYPE_CHOICES)
# The ID of the target entity being suggested to.
target_id = ndb.StringProperty(required=True, indexed=True)
# The version number of the target entity at the time of creation of the
# suggestion.
target_version_at_submission = ndb.IntegerProperty(
required=True, indexed=True)
# Status of the suggestion.
status = ndb.StringProperty(
required=True, indexed=True, choices=STATUS_CHOICES)
# The ID of the author of the suggestion.
author_id = ndb.StringProperty(required=True, indexed=True)
# The ID of the reviewer who accepted/rejected the suggestion.
final_reviewer_id = ndb.StringProperty(indexed=True)
# The change command linked to the suggestion. Contains the details of the
# change.
change_cmd = ndb.JsonProperty(required=True)
# The category to score the suggestor in. This field will contain 2 values
# separated by a ., the first will be a value from SCORE_TYPE_CHOICES and
# the second will be the subcategory of the suggestion.
score_category = ndb.StringProperty(required=True, indexed=True)
# The ISO 639-1 code used to query suggestions by language, or None if the
# suggestion type is not queryable by language.
language_code = ndb.StringProperty(indexed=True)
@staticmethod
def get_deletion_policy():
"""General suggestion needs to be pseudonymized for the user."""
return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE
@classmethod
def get_export_policy(cls):
"""Model contains user data."""
return dict(super(cls, cls).get_export_policy(), **{
'suggestion_type': base_models.EXPORT_POLICY.EXPORTED,
'target_type': base_models.EXPORT_POLICY.EXPORTED,
'target_id': base_models.EXPORT_POLICY.EXPORTED,
'target_version_at_submission':
base_models.EXPORT_POLICY.EXPORTED,
'status': base_models.EXPORT_POLICY.EXPORTED,
'author_id': base_models.EXPORT_POLICY.EXPORTED,
'final_reviewer_id': base_models.EXPORT_POLICY.EXPORTED,
'change_cmd': base_models.EXPORT_POLICY.EXPORTED,
'score_category': base_models.EXPORT_POLICY.EXPORTED,
'language_code': base_models.EXPORT_POLICY.EXPORTED
})
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether GeneralSuggestionModel exists for the user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return cls.query(
ndb.OR(cls.author_id == user_id, cls.final_reviewer_id == user_id)
).get(keys_only=True) is not None
@classmethod
def create(
cls, suggestion_type, target_type, target_id,
target_version_at_submission, status, author_id, final_reviewer_id,
change_cmd, score_category, thread_id, language_code):
"""Creates a new SuggestionModel entry.
Args:
suggestion_type: str. The type of the suggestion.
target_type: str. The type of target entity being edited.
target_id: str. The ID of the target entity being edited.
target_version_at_submission: int. The version number of the target
entity at the time of creation of the suggestion.
status: str. The status of the suggestion.
author_id: str. The ID of the user who submitted the suggestion.
final_reviewer_id: str. The ID of the reviewer who has
accepted/rejected the suggestion.
change_cmd: dict. The actual content of the suggestion.
score_category: str. The scoring category for the suggestion.
thread_id: str. The ID of the feedback thread linked to the
suggestion.
language_code: str|None. The ISO 639-1 code used to query
suggestions by language, or None if the suggestion type is not
queryable by language.
Raises:
Exception. There is already a suggestion with the given id.
"""
instance_id = thread_id
if cls.get_by_id(instance_id):
raise Exception(
'There is already a suggestion with the given'
' id: %s' % instance_id)
cls(
id=instance_id, suggestion_type=suggestion_type,
target_type=target_type, target_id=target_id,
target_version_at_submission=target_version_at_submission,
status=status, author_id=author_id,
final_reviewer_id=final_reviewer_id, change_cmd=change_cmd,
score_category=score_category, language_code=language_code).put()
@classmethod
def query_suggestions(cls, query_fields_and_values):
"""Queries for suggestions.
Args:
query_fields_and_values: list(tuple(str, str)). A list of queries.
The first element in each tuple is the field to be queried, and
the second element is the corresponding value to query for.
Returns:
list(SuggestionModel). A list of suggestions that match the given
query values, up to a maximum of feconf.DEFAULT_QUERY_LIMIT
suggestions.
"""
query = cls.query()
for (field, value) in query_fields_and_values:
if field not in ALLOWED_QUERY_FIELDS:
raise Exception('Not allowed to query on field %s' % field)
query = query.filter(getattr(cls, field) == value)
return query.fetch(feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_translation_suggestion_ids_with_exp_ids(cls, exp_ids):
"""Gets the ids of translation suggestions corresponding to
explorations with the given exploration ids.
Args:
exp_ids: list(str). List of exploration ids to query for.
Returns:
list(str). A list of translation suggestion ids that
correspond to the given exploration ids. Note: it is not
guaranteed that the suggestion ids returned are ordered by the
exploration ids in exp_ids.
"""
query = (
cls.get_all()
.order(cls.key)
.filter(cls.suggestion_type == SUGGESTION_TYPE_TRANSLATE_CONTENT)
.filter(cls.target_id.IN(exp_ids))
)
suggestion_models = []
cursor, more = (None, True)
while more:
results, cursor, more = query.fetch_page(
feconf.DEFAULT_QUERY_LIMIT, start_cursor=cursor)
suggestion_models.extend(results)
return [suggestion_model.id for suggestion_model in suggestion_models]
@classmethod
def get_all_stale_suggestion_ids(cls):
"""Gets the ids of the suggestions which were last updated before the
threshold time.
Returns:
list(str). A list of the ids of the suggestions that are stale.
"""
threshold_time = (
datetime.datetime.utcnow() - datetime.timedelta(
0, 0, 0, THRESHOLD_TIME_BEFORE_ACCEPT_IN_MSECS))
suggestion_models = cls.get_all().filter(
cls.status == STATUS_IN_REVIEW).filter(
cls.last_updated < threshold_time).fetch()
return [suggestion_model.id for suggestion_model in suggestion_models]
@classmethod
def get_in_review_suggestions_in_score_categories(
cls, score_categories, user_id):
"""Gets all suggestions which are in review in the given
score_categories.
Args:
score_categories: list(str). List of score categories to query for.
user_id: list(str). The id of the user trying to make this query.
As a user cannot review their own suggestions, suggestions
authored by the user will be excluded.
Returns:
list(SuggestionModel). A list of suggestions that are in the given
score categories, which are in review, but not created by the
given user.
"""
if len(score_categories) == 0:
raise Exception('Received empty list of score categories')
return cls.get_all().filter(cls.status == STATUS_IN_REVIEW).filter(
cls.score_category.IN(score_categories)).filter(
cls.author_id != user_id).fetch(
feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_in_review_suggestions_of_suggestion_type(
cls, suggestion_type, user_id):
"""Gets all suggestions of suggestion_type which are in review.
Args:
suggestion_type: str. The type of suggestion to query for.
user_id: str. The id of the user trying to make this query.
As a user cannot review their own suggestions, suggestions
authored by the user will be excluded.
Returns:
list(SuggestionModel). A list of suggestions that are of the given
type, which are in review, but not created by the given user.
"""
return cls.get_all().filter(cls.status == STATUS_IN_REVIEW).filter(
cls.suggestion_type == suggestion_type).filter(
cls.author_id != user_id).fetch(feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_question_suggestions_waiting_longest_for_review(cls):
"""Returns MAX_QUESTION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS number
of question suggestions, sorted in descending order by review wait
time.
Returns:
list(GeneralSuggestionModel). A list of question suggestions,
sorted in descending order based on how long the suggestions have
been waiting for review.
"""
return (
cls.get_all()
.filter(cls.status == STATUS_IN_REVIEW)
.filter(cls.suggestion_type == SUGGESTION_TYPE_ADD_QUESTION)
.order(cls.last_updated)
.fetch(MAX_QUESTION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS)
)
@classmethod
def get_translation_suggestions_waiting_longest_for_review_per_lang(
cls, language_code):
"""Returns MAX_TRANSLATION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS
number of translation suggestions in the specified language code,
sorted in descending order by review wait time.
Args:
language_code: str. The ISO 639-1 language code of the translation
suggestions.
Returns:
list(GeneralSuggestionModel). A list of translation suggestions,
sorted in descending order based on how long the suggestions have
been waiting for review.
"""
return (
cls.get_all()
.filter(cls.status == STATUS_IN_REVIEW)
.filter(cls.suggestion_type == SUGGESTION_TYPE_TRANSLATE_CONTENT)
.filter(cls.language_code == language_code)
.order(cls.last_updated)
.fetch(MAX_TRANSLATION_SUGGESTIONS_TO_FETCH_FOR_REVIEWER_EMAILS)
)
@classmethod
def get_user_created_suggestions_of_suggestion_type(
cls, suggestion_type, user_id):
"""Gets all suggestions of suggestion_type which the user has created.
Args:
suggestion_type: str. The type of suggestion to query for.
user_id: str. The id of the user trying to make this query.
Returns:
list(SuggestionModel). A list of suggestions that are of the given
type, which the given user has created.
"""
return cls.get_all().filter(
cls.suggestion_type == suggestion_type).filter(
cls.author_id == user_id).order(-cls.created_on).fetch(
feconf.DEFAULT_QUERY_LIMIT)
@classmethod
def get_all_score_categories(cls):
"""Gets all the score categories for which suggestions have been
created.
Returns:
list(str). A list of all the score categories.
"""
query_set = cls.query(projection=['score_category'], distinct=True)
return [data.score_category for data in query_set]
@classmethod
def export_data(cls, user_id):
"""Exports the data from GeneralSuggestionModel
into dict format for Takeout.
Args:
user_id: str. The ID of the user whose data should be exported.
Returns:
dict. Dictionary of the data from GeneralSuggestionModel.
"""
user_data = dict()
suggestion_models = (
cls.get_all()
.filter(cls.author_id == user_id).fetch())
for suggestion_model in suggestion_models:
user_data[suggestion_model.id] = {
'suggestion_type': suggestion_model.suggestion_type,
'target_type': suggestion_model.target_type,
'target_id': suggestion_model.target_id,
'target_version_at_submission': (
suggestion_model
.target_version_at_submission),
'status': suggestion_model.status,
'change_cmd': suggestion_model.change_cmd
}
return user_data
class GeneralVoiceoverApplicationModel(base_models.BaseModel):
"""A general model for voiceover application of an entity.
The ID of the voiceover application will be a random hashed value.
"""
# The type of entity to which the user will be assigned as a voice artist
# once the application will get approved.
target_type = ndb.StringProperty(required=True, indexed=True)
# The ID of the entity to which the application belongs.
target_id = ndb.StringProperty(required=True, indexed=True)
# The language code for the voiceover audio.
language_code = ndb.StringProperty(required=True, indexed=True)
# The status of the application. One of: accepted, rejected, in-review.
status = ndb.StringProperty(
required=True, indexed=True, choices=STATUS_CHOICES)
# The HTML content written in the given language_code.
# This will typically be a snapshot of the content of the initial card of
# the target.
content = ndb.TextProperty(required=True)
# The filename of the voiceover audio. The filename will have
# datetime-randomId(length 6)-language_code.mp3 pattern.
filename = ndb.StringProperty(required=True, indexed=True)
# The ID of the author of the voiceover application.
author_id = ndb.StringProperty(required=True, indexed=True)
# The ID of the reviewer who accepted/rejected the voiceover application.
final_reviewer_id = ndb.StringProperty(indexed=True)
# The plain text message submitted by the reviewer while rejecting the
# application.
rejection_message = ndb.TextProperty()
@staticmethod
def get_deletion_policy():
"""General voiceover application needs to be pseudonymized for the
user.
"""
return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether GeneralVoiceoverApplicationModel exists for the user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return cls.query(
ndb.OR(cls.author_id == user_id, cls.final_reviewer_id == user_id)
).get(keys_only=True) is not None
@classmethod
def get_user_voiceover_applications(cls, author_id, status=None):
"""Returns a list of voiceover application submitted by the given user.
Args:
author_id: str. The id of the user created the voiceover
application.
status: str|None. The status of the voiceover application.
If the status is None, the query will fetch all the
voiceover applications.
Returns:
list(GeneralVoiceoverApplicationModel). The list of voiceover
applications submitted by the given user.
"""
if status in STATUS_CHOICES:
return cls.query(ndb.AND(
cls.author_id == author_id, cls.status == status)).fetch()
else:
return cls.query(cls.author_id == author_id).fetch()
@classmethod
def get_reviewable_voiceover_applications(cls, user_id):
"""Returns a list of voiceover application which a given user can
review.
Args:
user_id: str. The id of the user trying to make this query.
As a user cannot review their own voiceover application, so the
voiceover application created by the user will be excluded.
Returns:
list(GeneralVoiceoverApplicationModel). The list of voiceover
applications which the given user can review.
"""
return cls.query(ndb.AND(
cls.author_id != user_id,
cls.status == STATUS_IN_REVIEW)).fetch()
@classmethod
def get_voiceover_applications(cls, target_type, target_id, language_code):
"""Returns a list of voiceover applications submitted for a give entity
in a given language.
Args:
target_type: str. The type of entity.
target_id: str. The ID of the targeted entity.
language_code: str. The code of the language in which the voiceover
application is submitted.
Returns:
list(GeneralVoiceoverApplicationModel). The list of voiceover
application which is submitted to a give entity in a given language.
"""
return cls.query(ndb.AND(
cls.target_type == target_type, cls.target_id == target_id,
cls.language_code == language_code)).fetch()
@classmethod
def get_export_policy(cls):
"""Model contains user data."""
return dict(super(cls, cls).get_export_policy(), **{
'target_type': base_models.EXPORT_POLICY.EXPORTED,
'target_id': base_models.EXPORT_POLICY.EXPORTED,
'language_code': base_models.EXPORT_POLICY.EXPORTED,
'status': base_models.EXPORT_POLICY.EXPORTED,
'content': base_models.EXPORT_POLICY.EXPORTED,
'filename': base_models.EXPORT_POLICY.EXPORTED,
'author_id': base_models.EXPORT_POLICY.EXPORTED,
'final_reviewer_id': base_models.EXPORT_POLICY.EXPORTED,
'rejection_message': base_models.EXPORT_POLICY.EXPORTED
})
@classmethod
def export_data(cls, user_id):
"""(Takeout) Exports the data from GeneralVoiceoverApplicationModel
into dict format.
Args:
user_id: str. The ID of the user whose data should be exported.
Returns:
dict. Dictionary of the data from GeneralVoiceoverApplicationModel.
"""
user_data = dict()
voiceover_models = (
cls.query(cls.author_id == user_id).fetch())
for voiceover_model in voiceover_models:
user_data[voiceover_model.id] = {
'target_type': voiceover_model.target_type,
'target_id': voiceover_model.target_id,
'language_code': voiceover_model.language_code,
'status': voiceover_model.status,
'content': voiceover_model.content,
'filename': voiceover_model.filename,
'rejection_message': voiceover_model.rejection_message
}
return user_data
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import pytest
import numpy as np
from ....tests.helper import assert_quantity_allclose, catch_warnings, remote_data
from .. import iers
from .... import units as u
from ....table import QTable
from ....time import Time
from ....extern.six.moves import urllib
FILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', IOError)
try:
iers.IERS_A.open() # check if IERS_A is available
except IOError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = os.path.join(os.path.dirname(__file__), 'iers_a_excerpt')
class TestBasic():
"""Basic tests that IERS_B returns correct values"""
def test_simple(self):
iers.IERS.close()
assert iers.IERS.iers_table is None
iers_tab = iers.IERS.open()
assert iers.IERS.iers_table is not None
assert isinstance(iers.IERS.iers_table, QTable)
assert iers_tab['UT1_UTC'].unit is u.second
assert iers_tab['PM_x'].unit is u.arcsecond
assert iers_tab['PM_y'].unit is u.arcsecond
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert ut1_utc.unit is u.second
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=1.*u.ns)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format='jd', scale='utc')
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] *u.s,
atol=1.*u.ns)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
def test_open_filename(self):
iers.IERS.close()
iers.IERS.open(iers.IERS_B_FILE)
assert iers.IERS.iers_table is not None
assert isinstance(iers.IERS.iers_table, QTable)
iers.IERS.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS.open('surely this does not exist')
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open("file:" + urllib.request.pathname2url(IERS_A_EXCERPT))
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
class TestIERS_AExcerpt():
def test_simple(self):
# Test the IERS A reader. It is also a regression tests that ensures
# values do not get overridden by IERS B; see #4933.
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert iers_tab['UT1_UTC'].unit is u.second
assert 'P' in iers_tab['UT1Flag']
assert 'I' in iers_tab['UT1Flag']
assert 'B' in iers_tab['UT1Flag']
assert np.all((iers_tab['UT1Flag'] == 'I') |
(iers_tab['UT1Flag'] == 'P') |
(iers_tab['UT1Flag'] == 'B'))
assert iers_tab['PM_x'].unit is u.arcsecond
assert iers_tab['PM_y'].unit is u.arcsecond
assert 'P' in iers_tab['PolPMFlag']
assert 'I' in iers_tab['PolPMFlag']
assert 'B' in iers_tab['PolPMFlag']
assert np.all((iers_tab['PolPMFlag'] == 'P') |
(iers_tab['PolPMFlag'] == 'I') |
(iers_tab['PolPMFlag'] == 'B'))
t = Time([57053., 57054., 57055.], format='mjd')
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
assert_quantity_allclose(ut1_utc,
[-0.4916557, -0.4925323, -0.4934373] * u.s,
atol=1.*u.ns)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(pm_x,
[0.003734, 0.004581, 0.004623] * u.arcsec,
atol=1.*u.narcsec)
assert_quantity_allclose(pm_y,
[0.310824, 0.313150, 0.315517] * u.arcsec,
atol=1.*u.narcsec)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
@pytest.mark.skipif(str('not HAS_IERS_A'))
class TestIERS_A():
def test_simple(self):
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=1.*u.ns)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.
class TestIERS_Auto():
@remote_data
def test_no_auto_download(self):
with iers.conf.set_temp('auto_download', False):
t = iers.IERS_Auto.open()
assert type(t) is iers.IERS_B
@remote_data
def test_simple(self):
iers_a_file_1 = os.path.join(os.path.dirname(__file__), 'finals2000A-2016-02-30-test')
iers_a_file_2 = os.path.join(os.path.dirname(__file__), 'finals2000A-2016-04-30-test')
iers_a_url_1 = 'file://' + os.path.abspath(iers_a_file_1)
iers_a_url_2 = 'file://' + os.path.abspath(iers_a_file_2)
with iers.conf.set_temp('iers_auto_url', iers_a_url_1):
dat = iers.IERS_Auto.open()
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == 57539.0 * u.d
# Pretend we are accessing at a time 7 days after start of predictive data
predictive_mjd = dat.meta['predictive_mjd']
dat._time_now = Time(predictive_mjd, format='mjd') + 7 * u.d
# Look at times before and after the test file begins. 0.1292905 is
# the IERS-B value from MJD=57359. The value in
# finals2000A-2016-02-30-test has been replaced at this point.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.2246227)
# Now pretend we are accessing at time 60 days after start of predictive data.
# There will be a warning when downloading the file doesn't give new data
# and an exception when extrapolating into the future with insufficient data.
dat._time_now = Time(predictive_mjd, format='mjd') + 60 * u.d
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)
with catch_warnings(iers.IERSStaleWarning) as warns:
with pytest.raises(ValueError) as err:
dat.ut1_utc(Time(60000, format='mjd').jd)
assert 'interpolating from IERS_Auto using predictive values' in str(err)
assert len(warns) == 1
assert 'IERS_Auto predictive values are older' in str(warns[0].message)
# Warning only if we are getting return status
with catch_warnings(iers.IERSStaleWarning) as warns:
dat.ut1_utc(Time(60000, format='mjd').jd, return_status=True)
assert len(warns) == 1
assert 'IERS_Auto predictive values are older' in str(warns[0].message)
# Now set auto_max_age = None which says that we don't care how old the
# available IERS-A file is. There should be no warnings or exceptions.
with iers.conf.set_temp('auto_max_age', None):
with catch_warnings(iers.IERSStaleWarning) as warns:
dat.ut1_utc(Time(60000, format='mjd').jd)
assert not warns
# Now point to a later file with same values but MJD increased by
# 60 days and see that things work. dat._time_now is still the same value
# as before, i.e. right around the start of predictive values for the new file.
# (In other words this is like downloading the latest file online right now).
with iers.conf.set_temp('iers_auto_url', iers_a_url_2):
# Look at times before and after the test file begins. This forces a new download.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.3)
# Now the time range should be different.
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == (57539.0 + 60) * u.d
| |
import smbus
import logging
import glob
import re
logger = logging.getLogger(__name__)
class DeviceException(Exception):
pass
class Registers(object):
MODE_1 = 0x00
MODE_2 = 0x01
LED_STRIP_START = 0x06 # LED0 ON Low Byte
PRE_SCALE = 0xFE
class Mode1(object):
RESTART = 7
EXTCLK = 6
AI = 5
SLEEP = 4
SUB1 = 3
SUB2 = 2
SUB3 = 1
ALLCALL = 0
class Mode2(object):
INVRT = 4
OCH = 3
OUTDRV = 2
OUTNE_1 =1
OUTNE_0 = 0
def value_low(val):
return val & 0xFF
def value_high(val):
return (val >> 8) & 0xFF
class Device(object):
ranges = dict(
pwm_frequency = (24, 1526),
led_number = (0, 15),
led_value = (0, 4095),
register_value = (0, 255),
)
def __init__(self, address, bus_number = None, bus_interface_factory = smbus.SMBus, glober = glob.glob):
"""Creates an interface to PCA9685 device
:param address: the I2C address of the device. Check the addressed with `i2cdetect -y 1`
:param bus_number: the number of the I2C bus in the linux machine. See /dev/i2c-*
:param bus_interface_factory: bus class factory, used for unit tests
:param glober: for search in file system, used for unit tests
"""
if bus_number is None:
bus_list = Device.get_i2c_bus_numbers(glober)
if len(bus_list) < 1:
raise DeviceException("Cannot determine I2C bus number")
bus_number = bus_list[0]
self.__address = address
self.__bus = bus_interface_factory(bus_number)
self.__oscillator_clock = 25000000
@staticmethod
def get_i2c_bus_numbers(glober = glob.glob):
"""Search all the available I2C devices in the system"""
res = []
for device in glober("/dev/i2c-*"):
r = re.match("/dev/i2c-([\d]){1,2}", device)
res.append(int(r.group(1)))
return res
@property
def mode_1(self):
"""Returns the Mode 1 register value"""
return self.read(Registers.MODE_1)
@property
def bus(self):
"""Returns the bus instance"""
return self.__bus
def get_led_register_from_name(self, name):
"""Parse the name for led number
:param name: attribute name, like: led_1
"""
res = re.match('^led_([0-9]{1,2})$', name)
if res is None:
raise AttributeError("Unknown attribute: '%s'" % name)
led_num = int(res.group(1))
if led_num < 0 or led_num > 15:
raise AttributeError("Unknown attribute: '%s'" % name)
return self.calc_led_register(led_num)
def calc_led_register(self, led_num):
"""Calculate register number for LED pin
:param led_num: the led number, typically 0-15
"""
start = Registers.LED_STRIP_START + 2
return start + (led_num * 4)
def __check_range(self, type, value):
range = self.ranges[type]
if value < range[0]:
raise DeviceException("%s must be greater than %s, got %s" % (type, range[0], value))
if value > range[1]:
raise DeviceException("%s must be less than %s, got %s" % (type, range[1], value))
def set_pwm(self, led_num, value):
"""Set PWM value for the specified LED
:param led_num: LED number (0-15)
:param value: the 12 bit value (0-4095)
"""
self.__check_range('led_number', led_num)
self.__check_range('led_value', value)
register_low = self.calc_led_register(led_num)
self.write(register_low, value_low(value))
self.write(register_low + 1, value_high(value))
def __get_led_value(self, register_low):
low = self.read(register_low)
high = self.read(register_low + 1)
return low + (high * 256)
def get_pwm(self, led_num):
"""Generic getter for all LED PWM value"""
self.__check_range('led_number', led_num)
register_low = self.calc_led_register(led_num)
return self.__get_led_value(register_low)
def __getattr__(self, name):
"""Generic getter property handler for all LED PWM value"""
register_low = self.get_led_register_from_name(name)
return self.__get_led_value(register_low)
def sleep(self):
"""Send the controller to sleep"""
logger.debug("Sleep the controller")
self.write(Registers.MODE_1, self.mode_1 | (1 << Mode1.SLEEP))
def wake(self):
"""Wake up the controller"""
logger.debug("Wake up the controller")
self.write(Registers.MODE_1, self.mode_1 & (255 - (1 << Mode1.SLEEP)))
def write(self, reg, value):
"""Write raw byte value to the specified register
:param reg: the register number (0-69, 250-255)
:param value: byte value
"""
# TODO: check reg: 0-69, 250-255
self.__check_range('register_value', value)
logger.debug("Write '%s' to register '%s'" % (value, reg))
self.__bus.write_byte_data(self.__address, reg, value)
def read(self, reg):
"""Read data from register
:param reg: the register number (0-69, 250-255)
"""
return self.__bus.read_byte_data(self.__address, reg)
def calc_pre_scale(self, frequency):
"""Calculate the controller's PRE_SCALE value, specified by the PCA9685 datasheet
:param frequency: source frequency value
"""
return int(round(self.__oscillator_clock / (4096.0 * frequency)) - 1)
def set_pwm_frequency(self, value):
"""Set the frequency for all PWM output
:param value: the frequency in Hz
"""
self.__check_range('pwm_frequency', value)
reg_val = self.calc_pre_scale(value)
logger.debug("Calculated prescale value is %s" % reg_val)
self.sleep()
self.write(Registers.PRE_SCALE, reg_val)
self.wake()
def calc_frequency(self, prescale):
"""Calculate the frequency by the controller's prescale, specified by the PCA9685 datasheet
:param prescale: the prescale value of the controller
"""
return int(round(self.__oscillator_clock / ((prescale + 1) * 4096.0)))
def get_pwm_frequency(self):
"""Gets the frequency for all PWM output"""
return self.calc_frequency(self.read(Registers.PRE_SCALE))
| |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filters for Google Cloud Bigtable Row classes."""
from gcloud._helpers import _microseconds_from_datetime
from gcloud._helpers import _to_bytes
from gcloud.bigtable._generated import (
data_pb2 as data_v2_pb2)
class RowFilter(object):
"""Basic filter to apply to cells in a row.
These values can be combined via :class:`RowFilterChain`,
:class:`RowFilterUnion` and :class:`ConditionalRowFilter`.
.. note::
This class is a do-nothing base class for all row filters.
"""
def __ne__(self, other):
return not self.__eq__(other)
class _BoolFilter(RowFilter):
"""Row filter that uses a boolean flag.
:type flag: bool
:param flag: An indicator if a setting is turned on or off.
"""
def __init__(self, flag):
self.flag = flag
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.flag == self.flag
class SinkFilter(_BoolFilter):
"""Advanced row filter to skip parent filters.
:type flag: bool
:param flag: ADVANCED USE ONLY. Hook for introspection into the row filter.
Outputs all cells directly to the output of the read rather
than to any parent filter. Cannot be used within the
``predicate_filter``, ``true_filter``, or ``false_filter``
of a :class:`ConditionalRowFilter`.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(sink=self.flag)
class PassAllFilter(_BoolFilter):
"""Row filter equivalent to not filtering at all.
:type flag: bool
:param flag: Matches all cells, regardless of input. Functionally
equivalent to leaving ``filter`` unset, but included for
completeness.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(pass_all_filter=self.flag)
class BlockAllFilter(_BoolFilter):
"""Row filter that doesn't match any cells.
:type flag: bool
:param flag: Does not match any cells, regardless of input. Useful for
temporarily disabling just part of a filter.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(block_all_filter=self.flag)
class _RegexFilter(RowFilter):
"""Row filter that uses a regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
:type regex: bytes or str
:param regex: A regular expression (RE2) for some row filter.
"""
def __init__(self, regex):
self.regex = _to_bytes(regex)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.regex == self.regex
class RowKeyRegexFilter(_RegexFilter):
"""Row filter for a row key regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
.. note::
Special care need be used with the expression used. Since
each of these properties can contain arbitrary bytes, the ``\\C``
escape sequence must be used if a true wildcard is desired. The ``.``
character will not match the new line character ``\\n``, which may be
present in a binary value.
:type regex: bytes
:param regex: A regular expression (RE2) to match cells from rows with row
keys that satisfy this regex. For a
``CheckAndMutateRowRequest``, this filter is unnecessary
since the row key is already specified.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(row_key_regex_filter=self.regex)
class RowSampleFilter(RowFilter):
"""Matches all cells from a row with probability p.
:type sample: float
:param sample: The probability of matching a cell (must be in the
interval ``[0, 1]``).
"""
def __init__(self, sample):
self.sample = sample
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.sample == self.sample
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(row_sample_filter=self.sample)
class FamilyNameRegexFilter(_RegexFilter):
"""Row filter for a family name regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
:type regex: str
:param regex: A regular expression (RE2) to match cells from columns in a
given column family. For technical reasons, the regex must
not contain the ``':'`` character, even if it is not being
used as a literal.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(family_name_regex_filter=self.regex)
class ColumnQualifierRegexFilter(_RegexFilter):
"""Row filter for a column qualifier regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
.. note::
Special care need be used with the expression used. Since
each of these properties can contain arbitrary bytes, the ``\\C``
escape sequence must be used if a true wildcard is desired. The ``.``
character will not match the new line character ``\\n``, which may be
present in a binary value.
:type regex: bytes
:param regex: A regular expression (RE2) to match cells from column that
match this regex (irrespective of column family).
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(column_qualifier_regex_filter=self.regex)
class TimestampRange(object):
"""Range of time with inclusive lower and exclusive upper bounds.
:type start: :class:`datetime.datetime`
:param start: (Optional) The (inclusive) lower bound of the timestamp
range. If omitted, defaults to Unix epoch.
:type end: :class:`datetime.datetime`
:param end: (Optional) The (exclusive) upper bound of the timestamp
range. If omitted, no upper bound is used.
"""
def __init__(self, start=None, end=None):
self.start = start
self.end = end
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.start == self.start and
other.end == self.end)
def __ne__(self, other):
return not self.__eq__(other)
def to_pb(self):
"""Converts the :class:`TimestampRange` to a protobuf.
:rtype: :class:`.data_v2_pb2.TimestampRange`
:returns: The converted current object.
"""
timestamp_range_kwargs = {}
if self.start is not None:
timestamp_range_kwargs['start_timestamp_micros'] = (
_microseconds_from_datetime(self.start))
if self.end is not None:
timestamp_range_kwargs['end_timestamp_micros'] = (
_microseconds_from_datetime(self.end))
return data_v2_pb2.TimestampRange(**timestamp_range_kwargs)
class TimestampRangeFilter(RowFilter):
"""Row filter that limits cells to a range of time.
:type range_: :class:`TimestampRange`
:param range_: Range of time that cells should match against.
"""
def __init__(self, range_):
self.range_ = range_
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.range_ == self.range_
def to_pb(self):
"""Converts the row filter to a protobuf.
First converts the ``range_`` on the current object to a protobuf and
then uses it in the ``timestamp_range_filter`` field.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(
timestamp_range_filter=self.range_.to_pb())
class ColumnRangeFilter(RowFilter):
"""A row filter to restrict to a range of columns.
Both the start and end column can be included or excluded in the range.
By default, we include them both, but this can be changed with optional
flags.
:type column_family_id: str
:param column_family_id: The column family that contains the columns. Must
be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type start_column: bytes
:param start_column: The start of the range of columns. If no value is
used, the backend applies no upper bound to the
values.
:type end_column: bytes
:param end_column: The end of the range of columns. If no value is used,
the backend applies no upper bound to the values.
:type inclusive_start: bool
:param inclusive_start: Boolean indicating if the start column should be
included in the range (or excluded). Defaults
to :data:`True` if ``start_column`` is passed and
no ``inclusive_start`` was given.
:type inclusive_end: bool
:param inclusive_end: Boolean indicating if the end column should be
included in the range (or excluded). Defaults
to :data:`True` if ``end_column`` is passed and
no ``inclusive_end`` was given.
:raises: :class:`ValueError <exceptions.ValueError>` if ``inclusive_start``
is set but no ``start_column`` is given or if ``inclusive_end``
is set but no ``end_column`` is given
"""
def __init__(self, column_family_id, start_column=None, end_column=None,
inclusive_start=None, inclusive_end=None):
self.column_family_id = column_family_id
if inclusive_start is None:
inclusive_start = True
elif start_column is None:
raise ValueError('Inclusive start was specified but no '
'start column was given.')
self.start_column = start_column
self.inclusive_start = inclusive_start
if inclusive_end is None:
inclusive_end = True
elif end_column is None:
raise ValueError('Inclusive end was specified but no '
'end column was given.')
self.end_column = end_column
self.inclusive_end = inclusive_end
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.column_family_id == self.column_family_id and
other.start_column == self.start_column and
other.end_column == self.end_column and
other.inclusive_start == self.inclusive_start and
other.inclusive_end == self.inclusive_end)
def to_pb(self):
"""Converts the row filter to a protobuf.
First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it
in the ``column_range_filter`` field.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
column_range_kwargs = {'family_name': self.column_family_id}
if self.start_column is not None:
if self.inclusive_start:
key = 'start_qualifier_closed'
else:
key = 'start_qualifier_open'
column_range_kwargs[key] = _to_bytes(self.start_column)
if self.end_column is not None:
if self.inclusive_end:
key = 'end_qualifier_closed'
else:
key = 'end_qualifier_open'
column_range_kwargs[key] = _to_bytes(self.end_column)
column_range = data_v2_pb2.ColumnRange(**column_range_kwargs)
return data_v2_pb2.RowFilter(column_range_filter=column_range)
class ValueRegexFilter(_RegexFilter):
"""Row filter for a value regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
.. note::
Special care need be used with the expression used. Since
each of these properties can contain arbitrary bytes, the ``\\C``
escape sequence must be used if a true wildcard is desired. The ``.``
character will not match the new line character ``\\n``, which may be
present in a binary value.
:type regex: bytes
:param regex: A regular expression (RE2) to match cells with values that
match this regex.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(value_regex_filter=self.regex)
class ValueRangeFilter(RowFilter):
"""A range of values to restrict to in a row filter.
Will only match cells that have values in this range.
Both the start and end value can be included or excluded in the range.
By default, we include them both, but this can be changed with optional
flags.
:type start_value: bytes
:param start_value: The start of the range of values. If no value is used,
the backend applies no lower bound to the values.
:type end_value: bytes
:param end_value: The end of the range of values. If no value is used,
the backend applies no upper bound to the values.
:type inclusive_start: bool
:param inclusive_start: Boolean indicating if the start value should be
included in the range (or excluded). Defaults
to :data:`True` if ``start_value`` is passed and
no ``inclusive_start`` was given.
:type inclusive_end: bool
:param inclusive_end: Boolean indicating if the end value should be
included in the range (or excluded). Defaults
to :data:`True` if ``end_value`` is passed and
no ``inclusive_end`` was given.
:raises: :class:`ValueError <exceptions.ValueError>` if ``inclusive_start``
is set but no ``start_value`` is given or if ``inclusive_end``
is set but no ``end_value`` is given
"""
def __init__(self, start_value=None, end_value=None,
inclusive_start=None, inclusive_end=None):
if inclusive_start is None:
inclusive_start = True
elif start_value is None:
raise ValueError('Inclusive start was specified but no '
'start value was given.')
self.start_value = start_value
self.inclusive_start = inclusive_start
if inclusive_end is None:
inclusive_end = True
elif end_value is None:
raise ValueError('Inclusive end was specified but no '
'end value was given.')
self.end_value = end_value
self.inclusive_end = inclusive_end
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.start_value == self.start_value and
other.end_value == self.end_value and
other.inclusive_start == self.inclusive_start and
other.inclusive_end == self.inclusive_end)
def to_pb(self):
"""Converts the row filter to a protobuf.
First converts to a :class:`.data_v2_pb2.ValueRange` and then uses
it to create a row filter protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
value_range_kwargs = {}
if self.start_value is not None:
if self.inclusive_start:
key = 'start_value_closed'
else:
key = 'start_value_open'
value_range_kwargs[key] = _to_bytes(self.start_value)
if self.end_value is not None:
if self.inclusive_end:
key = 'end_value_closed'
else:
key = 'end_value_open'
value_range_kwargs[key] = _to_bytes(self.end_value)
value_range = data_v2_pb2.ValueRange(**value_range_kwargs)
return data_v2_pb2.RowFilter(value_range_filter=value_range)
class _CellCountFilter(RowFilter):
"""Row filter that uses an integer count of cells.
The cell count is used as an offset or a limit for the number
of results returned.
:type num_cells: int
:param num_cells: An integer count / offset / limit.
"""
def __init__(self, num_cells):
self.num_cells = num_cells
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.num_cells == self.num_cells
class CellsRowOffsetFilter(_CellCountFilter):
"""Row filter to skip cells in a row.
:type num_cells: int
:param num_cells: Skips the first N cells of the row.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(
cells_per_row_offset_filter=self.num_cells)
class CellsRowLimitFilter(_CellCountFilter):
"""Row filter to limit cells in a row.
:type num_cells: int
:param num_cells: Matches only the first N cells of the row.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells)
class CellsColumnLimitFilter(_CellCountFilter):
"""Row filter to limit cells in a column.
:type num_cells: int
:param num_cells: Matches only the most recent N cells within each column.
This filters a (family name, column) pair, based on
timestamps of each cell.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(
cells_per_column_limit_filter=self.num_cells)
class StripValueTransformerFilter(_BoolFilter):
"""Row filter that transforms cells into empty string (0 bytes).
:type flag: bool
:param flag: If :data:`True`, replaces each cell's value with the empty
string. As the name indicates, this is more useful as a
transformer than a generic query / filter.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(strip_value_transformer=self.flag)
class ApplyLabelFilter(RowFilter):
"""Filter to apply labels to cells.
Intended to be used as an intermediate filter on a pre-existing filtered
result set. This way if two sets are combined, the label can tell where
the cell(s) originated.This allows the client to determine which results
were produced from which part of the filter.
.. note::
Due to a technical limitation of the backend, it is not currently
possible to apply multiple labels to a cell.
:type label: str
:param label: Label to apply to cells in the output row. Values must be
at most 15 characters long, and match the pattern
``[a-z0-9\\-]+``.
"""
def __init__(self, label):
self.label = label
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.label == self.label
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(apply_label_transformer=self.label)
class _FilterCombination(RowFilter):
"""Chain of row filters.
Sends rows through several filters in sequence. The filters are "chained"
together to process a row. After the first filter is applied, the second
is applied to the filtered output and so on for subsequent filters.
:type filters: list
:param filters: List of :class:`RowFilter`
"""
def __init__(self, filters=None):
if filters is None:
filters = []
self.filters = filters
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.filters == self.filters
class RowFilterChain(_FilterCombination):
"""Chain of row filters.
Sends rows through several filters in sequence. The filters are "chained"
together to process a row. After the first filter is applied, the second
is applied to the filtered output and so on for subsequent filters.
:type filters: list
:param filters: List of :class:`RowFilter`
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
chain = data_v2_pb2.RowFilter.Chain(
filters=[row_filter.to_pb() for row_filter in self.filters])
return data_v2_pb2.RowFilter(chain=chain)
class RowFilterUnion(_FilterCombination):
"""Union of row filters.
Sends rows through several filters simultaneously, then
merges / interleaves all the filtered results together.
If multiple cells are produced with the same column and timestamp,
they will all appear in the output row in an unspecified mutual order.
:type filters: list
:param filters: List of :class:`RowFilter`
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
interleave = data_v2_pb2.RowFilter.Interleave(
filters=[row_filter.to_pb() for row_filter in self.filters])
return data_v2_pb2.RowFilter(interleave=interleave)
class ConditionalRowFilter(RowFilter):
"""Conditional row filter which exhibits ternary behavior.
Executes one of two filters based on another filter. If the ``base_filter``
returns any cells in the row, then ``true_filter`` is executed. If not,
then ``false_filter`` is executed.
.. note::
The ``base_filter`` does not execute atomically with the true and false
filters, which may lead to inconsistent or unexpected results.
Additionally, executing a :class:`ConditionalRowFilter` has poor
performance on the server, especially when ``false_filter`` is set.
:type base_filter: :class:`RowFilter`
:param base_filter: The filter to condition on before executing the
true/false filters.
:type true_filter: :class:`RowFilter`
:param true_filter: (Optional) The filter to execute if there are any cells
matching ``base_filter``. If not provided, no results
will be returned in the true case.
:type false_filter: :class:`RowFilter`
:param false_filter: (Optional) The filter to execute if there are no cells
matching ``base_filter``. If not provided, no results
will be returned in the false case.
"""
def __init__(self, base_filter, true_filter=None, false_filter=None):
self.base_filter = base_filter
self.true_filter = true_filter
self.false_filter = false_filter
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.base_filter == self.base_filter and
other.true_filter == self.true_filter and
other.false_filter == self.false_filter)
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
condition_kwargs = {'predicate_filter': self.base_filter.to_pb()}
if self.true_filter is not None:
condition_kwargs['true_filter'] = self.true_filter.to_pb()
if self.false_filter is not None:
condition_kwargs['false_filter'] = self.false_filter.to_pb()
condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs)
return data_v2_pb2.RowFilter(condition=condition)
| |
#!/usr/bin/env python
"""Copyright (c) 2016, Dilithium Power Systems LLC All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Dilithium Power Systems LLC.
"""
import logging
from Tkinter import *
import ttk
import time
import sys
sys.path.append('./util')
sys.path.append('../util')
import can_ethernet
import can_msg_pb2
import eeprom
import mppt
from multiprocessing import freeze_support
class GUI(Frame):
def writeEEPROM(self):
# see if it's possible to configure
#try:
if True:
self.configChannel = self.selectedChannel.get()
ee = self.tracker[self.configChannel].ee
#except:
# return
nEEPROMValues = len(ee.data)
for i in range(nEEPROMValues):
eeName = ee.data[i][0]
valueToWrite = self.eepromNewValueVar[i].get()
print 'name= {0:s} val= {1:g}'.format(eeName, valueToWrite)
ee.writeValue(eeName, valueToWrite)
# reset the mppt
self.tracker[self.configChannel].reset()
self.discoverMPPTs()
ee = self.tracker[self.configChannel].ee
for i in range(len(ee.data)):
eeType = ee.data[i][1]
eeValue = ee.data[i][2]
if(eeType == 'int32'):
self.eepromValueVar[i].set('{:d}'.format(eeValue))
else:
self.eepromValueVar[i].set('{:g}'.format(eeValue))
def loadConfigFromFile(self):
# see if it's possible to configure
if True:
#try:
self.configChannel = self.selectedChannel.get()
ee = self.tracker[self.configChannel].ee
#except:
# return
filename = 'configuration.csv'
sn = int(self.newSN.get())
ee.loadConfigurationFromFile(filename, sn)
# reset the mppt
self.tracker[self.configChannel].reset()
self.discoverMPPTs()
# load the new values from the mppt
ee = self.tracker[self.configChannel].ee
# load the values into the gui
for i in range(len(ee.data)):
eeType = ee.data[i][1]
eeValue = ee.data[i][2]
if(eeType == 'int32'):
self.eepromValueVar[i].set('{:d}'.format(eeValue))
else:
self.eepromValueVar[i].set('{:g}'.format(eeValue))
def configureMPPT(self):
# see if it's possible to configure
if True:
#try:
self.configChannel = self.selectedChannel.get()
ee = self.tracker[self.configChannel].ee
#except:
# return
# disable the button
self.configureButton.configure(state=DISABLED)
cw = Toplevel(self.w)
self.cw = cw
# intercept the destroy action to run the close function first
cw.protocol('WM_DELETE_WINDOW', self.CloseConfigWindow)
# title the window
cw.wm_title(
'MPPT Channel {0:d} Configuration'.format(
self.configChannel))
px = 2
py = 2
# set up column widths
cw.columnconfigure(0, minsize=100, weight=10)
cw.columnconfigure(1, minsize=100, weight=10)
cw.columnconfigure(2, minsize=100, weight=10)
cw.columnconfigure(3, minsize=100, weight=10)
# column labels
Label(cw, text='Parameter').grid(row=1, column=0, padx=px, pady=py)
Label(cw, text='Type').grid(row=1, column=1, padx=px, pady=py)
Label(cw, text='Current Value').grid(row=1, column=2, padx=px, pady=py)
# initialize the individual channel objects
nEEPROMValues = len(ee.data)
self.eepromName = {}
self.eepromType = {}
self.eepromValue = {}
self.eepromValueVar = {}
self.loadConfigButton = Button(
cw,
text='Load New Config',
command=self.loadConfigFromFile)
self.loadConfigButton.grid(row=0, column=0, sticky=W, padx=px, pady=py)
Label(cw, text='SN to Write').grid(row=0, column = 1, padx = px, pady = py)
self.newSN = StringVar()
self.newSN.set('{0:d}'.format(ee.data[0][2]))
vcmd = cw.register(self.validateCANSNEntry)
self.newSNEntry = Entry(cw, textvariable=self.newSN, validate='key',
validatecommand=(vcmd, '%P'))
self.newSNEntry.grid(row=(0), column=2, sticky = W)
for i in range(nEEPROMValues):
rw = i + 2
eeName = ee.data[i][0]
eeType = ee.data[i][1]
# eeprom parameter name
self.eepromName[i] = Label(cw, text='{0:s}'.format(eeName))
self.eepromName[i].grid(
row=rw,
column=0,
sticky=W,
padx=px,
pady=py)
# eeprom parameter type
self.eepromType[i] = Label(cw, text='{0:s}'.format(eeType))
self.eepromType[i].grid(
row=rw,
column=1,
sticky=W,
padx=px,
pady=py)
#try:
if True:
eeValue = ee.readValue(eeName)
#except:
# eeValue = -1
# eeprom parameter value
self.eepromValueVar[i] = StringVar()
if(eeType == 'int32'):
self.eepromValueVar[i].set('{:d}'.format(eeValue))
else:
self.eepromValueVar[i].set('{:g}'.format(eeValue))
self.eepromValue[i] = Label(
cw,
textvariable=self.eepromValueVar[i])
self.eepromValue[i].grid(
row=rw,
column=2,
sticky=W,
padx=px,
pady=py)
def CloseConfigWindow(self):
self.configureButton.configure(state=NORMAL)
self.cw.destroy()
def validateCANSNEntry(self, P):
try:
if P != '':
v = int(P, 10)
if v > 32000 or v < 0:
raise ValueError
return True
except ValueError:
return False
def validateCANAddressEntry(self, P):
try:
v = int(P, 16)
if v > 0x7ff:
raise ValueError
return True
except ValueError:
return False
def configCAN(self):
self.killThread()
self.guiStatus.config(text='Status: Configuring CAN Bus')
bitrate = int(self.bitrateStr.get())
if True:
if hasattr(self, 'can'):
self.can.Close()
self.can = can_ethernet.canEthernet()
self.can.Connect(bitrate)
self.guiStatus.config(text='Status: Configuring CAN Bus Succeeded')
self.bridge.config(text='CAN Bridge: %x' % (self.can.bridgeIP))
self.discoverButton.config(state=NORMAL)
#except:
# self.guiStatus.config(text='Status: Configuring CAN Bus Failed')
def setConfigState(self, index, state):
assert state == 'NORMAL' or state == 'DISABLED'
if(state == 'NORMAL'):
self.inputVoltage[index].config(state=NORMAL)
self.inputCurrent[index].config(state=NORMAL)
self.inputPower[index].config(state=NORMAL)
self.outputVoltage[index].config(state=NORMAL)
self.temperature[index].config(state=NORMAL)
self.channelSelected[index].config(state=NORMAL)
else:
self.inputVoltage[index].config(state=DISABLED)
self.inputCurrent[index].config(state=DISABLED)
self.inputPower[index].config(state=DISABLED)
self.outputVoltage[index].config(state=DISABLED)
self.temperature[index].config(state=DISABLED)
self.channelSelected[index].config(state=DISABLED)
return
def setValues(self, index, error=False, errorString=''):
assert error == True or error == False
if error == False:
# if theres no error, update the values
self.inputVoltageStr[index].set(
'{0:g}'.format(
self.tracker[index].vin))
self.inputCurrentStr[index].set(
'{0:g}'.format(
self.tracker[index].iin))
self.inputPowerStr[index].set(
'{0:g}'.format(
self.tracker[index].vin *
self.tracker[index].iin))
self.outputVoltageStr[index].set(
'{0:g}'.format(
self.tracker[index].vout))
self.temperatureStr[index].set(
'{0:g}'.format(
self.tracker[index].temp))
else:
# if theres an error, print the error string
self.inputVoltageStr[index].set(errorString)
self.inputCurrentStr[index].set(errorString)
self.inputPowerStr[index].set(errorString)
self.outputVoltageStr[index].set(errorString)
self.temperatureStr[index].set(errorString)
return
def killThread(self):
if self.updateJob is not None:
self.w.after_cancel(self.updateJob)
def discoverMPPTs(self):
logging.info('Discovering MPPTs')
first = True
baseAddr = int(self.baseAddr.get(), 16)
for i in range(16):
try:
self.tracker[i] = mppt.mppt(i, baseAddr, self.can)
try:
sn = self.tracker[i].ee.readValue('serialNumber')
swRev = self.tracker[i].ee.readValue('SWVersion')
self.mpptStatus[i].config(
text='SN ' +
str(sn) +
' SW ' +
str(swRev))
except eeprom.MpptEepromError:
self.mpptStatus[i].config(text='Error Reading SN')
self.setConfigState(i, 'NORMAL')
self.trackerFound[i] = 1
except mppt.MpptNotPresent:
self.mpptStatus[i].config(text='Not Found')
self.inputVoltage[i].config
self.trackerFound[i] = 0
self.setConfigState(i, 'DISABLED')
# update the gui
self.w.update_idletasks()
self.guiStatus.config(
text='Status: {0:g} MPPTs found'.format(sum(self.trackerFound)))
firstChannel = -1
for i in range(16):
if self.trackerFound[i] == 1:
firstChannel = i
break
# if trackers are present then start the update thread
if sum(self.trackerFound) > 0:
self.configureButton.config(state=NORMAL)
self.selectedChannel.set(firstChannel)
self.updateMPPTStatus()
else:
self.configureButton.config(state=DISABLED)
return
def updateMPPTStatus(self):
# ensure theres only one thread running
self.killThread()
# this line make it call itself at the chosen rate
self.updateJob = self.w.after(
1000 /
self.updateSpeed,
self.updateMPPTStatus)
# cycle through the mppts and if it showed up on the but get it's data
# and display it
for i in range(16):
if(self.trackerFound[i] == 1):
# try:
self.tracker[i].getStateData()
self.setValues(i)
# except:
# self.setValues(i, True, 'error')
else:
self.setValues(i, True, '0')
# print the update number
self.updateRate = 1 / (time.time() - self.lastUpdateTime)
self.lastUpdateTime = time.time()
updateRateFail = self.updateRate < self.updateSpeed / 5
if updateRateFail:
logging.error('Rate too slow, Update Rate = %0.1fhz' % self.updateRate)
else:
logging.info('New Update, Update Rate = %0.1fhz' % self.updateRate)
self.updateNumber += 1
return
def Close(self):
if hasattr(self, 'can'):
self.can.Close()
self.w.quit()
def __init__(self, w):
logging.basicConfig(level=logging.INFO)
# make a reference back to the top window
self.w = w
logging.info('Initialized GUI')
# handle closing the window gracefully
w.protocol('WM_DELETE_WINDOW', self.Close)
# setting up tkinter environment
color = '#%02x%02x%02x' % (158, 158, 158)
w.title('Photon MPPT Status')
# set up column widths
w.columnconfigure(0, minsize=50, weight=10)
w.columnconfigure(1, minsize=50, weight=10)
w.columnconfigure(2, minsize=50, weight=6)
w.columnconfigure(3, minsize=50, weight=6)
w.columnconfigure(4, minsize=50, weight=6)
w.columnconfigure(5, minsize=50, weight=6)
w.columnconfigure(6, minsize=50, weight=6)
# classvars
self.lastUpdateTime = time.time()
self.channelSelected = [None] * 16
self.tracker = [None] * 16
self.mpptStatus = [None] * 16
self.inputVoltage = [None] * 16
self.inputVoltageStr = [None] * 16
self.inputCurrent = [None] * 16
self.inputCurrentStr = [None] * 16
self.inputPower = [None] * 16
self.inputPowerStr = [None] * 16
self.outputVoltage = [None] * 16
self.trackerFound = [None] * 16
self.outputVoltageStr = [None] * 16
self.temperature = [None] * 16
self.temperatureStr = [None] * 16
self.updateNumber = 0
self.updateJob = None
self.updateSpeed = 2 # hzi
self.configWindowOpen = 0
px = 2
py = 2
# tkinter object vars
self.heartbeat = IntVar()
self.baseAddr = StringVar()
self.baseAddr.set('0x600')
self.bitrateStr = StringVar()
self.bitrateStr.set('125000')
self.selectedChannel = IntVar()
self.selectedChannel.set(-1)
# setting up menubar
self.menubar = Menu(w)
w.configure(menu=self.menubar)
#self.fileMenu = Menu(self.menubar)
#self.menubar.add_cascade(label="File", menu=self.fileMenu)
# column labels
Label(w, text='Channel').grid(row=0, column=0, padx=px, pady=py)
Label(w, text='Status').grid(row=0, column=1, padx=px, pady=py)
Label(w, text='Input Voltage').grid(row=0, column=2, padx=px, pady=py)
Label(w, text='Input Current').grid(row=0, column=3, padx=px, pady=py)
Label(w, text='Input Power').grid(row=0, column=4, padx=px, pady=py)
Label(w, text='Output Voltage').grid(row=0, column=5, padx=px, pady=py)
Label(w, text='MPPT Temp').grid(row=0, column=6, padx=px, pady=py)
# initialize the individual channel objects
for i in range(16):
rw = i + 1
# box for writing the channel number
txt = '{0:g}'.format(i)
self.channelSelected[i] = Radiobutton(w, text=txt, variable=self.selectedChannel,
value=i, state=DISABLED)
self.channelSelected[i].grid(
row=rw,
column=0,
sticky=W,
padx=px,
pady=py)
# status box for writing the serial number and software revision
self.mpptStatus[i] = Label(w, text='')
self.mpptStatus[i].grid(
row=rw,
column=1,
sticky=W,
padx=px,
pady=py)
# make handles for the objects for writing to the value cells
self.inputVoltageStr[i] = StringVar()
self.inputCurrentStr[i] = StringVar()
self.inputPowerStr[i] = StringVar()
self.outputVoltageStr[i] = StringVar()
self.temperatureStr[i] = StringVar()
self.inputVoltage[i] = Entry(
w,
state=DISABLED,
textvariable=self.inputVoltageStr[i])
self.inputVoltage[i].grid(
row=rw,
column=2,
sticky=W,
padx=px,
pady=py)
self.inputCurrent[i] = Entry(
w,
state=DISABLED,
textvariable=self.inputCurrentStr[i])
self.inputCurrent[i].grid(
row=rw,
column=3,
sticky=W,
padx=px,
pady=py)
self.inputPower[i] = Entry(
w,
state=DISABLED,
textvariable=self.inputPowerStr[i])
self.inputPower[i].grid(
row=rw,
column=4,
sticky=W,
padx=px,
pady=py)
self.outputVoltage[i] = Entry(
w,
state=DISABLED,
textvariable=self.outputVoltageStr[i])
self.outputVoltage[i].grid(
row=rw,
column=5,
sticky=W,
padx=px,
pady=py)
self.temperature[i] = Entry(
w,
state=DISABLED,
textvariable=self.temperatureStr[i])
self.temperature[i].grid(
row=rw,
column=6,
sticky=W,
padx=px,
pady=py)
# footer status labels
self.guiStatus = Label(w, text='Status: Init')
self.guiStatus.grid(
row=18,
column=0,
columnspan=2,
sticky=W,
padx=px,
pady=py)
# status for the can bridge
self.bridge = Label(w, text='CAN Bridge:')
self.bridge.grid(
row=19,
column=0,
columnspan=2,
sticky=W,
padx=px,
pady=py)
# label for the can address
self.canAddrLabel = Label(w, text='CAN Base Addr:')
self.canAddrLabel.grid(row=20, column=0, sticky=W, padx=px, pady=py)
# button to intialize the can bus
self.canButton = ttk.Button(w, text='Init CAN', command=self.configCAN)
self.canButton.grid(row=17, column=0)
# button to discover the mppts
self.discoverButton = ttk.Button(w, state=DISABLED, text='Discover MPPTs',
command=self.discoverMPPTs)
self.discoverButton.grid(row=17, column=1)
# button to configure the mpppt
self.configureButton = ttk.Button(
w,
text='Config MPPT',
command=self.configureMPPT)
self.configureButton.grid(row=17, column=2)
self.configureButton.configure(state=DISABLED)
# entry for the can address - this uses a validator
vcmd = w.register(self.validateCANAddressEntry)
self.canAddrEntry = Entry(
w,
textvariable=self.baseAddr,
validate='key',
validatecommand=(
vcmd,
'%P'))
self.canAddrEntry.grid(row=20, column=1, sticky=W)
self.bitrateLabel = Label(w, text='CAN Bitrate:')
self.bitrateLabel.grid(row=21, column=0, sticky=W, padx=px, pady=py)
self.bitrateSelect = ttk.Combobox(
w,
textvariable=self.bitrateStr,
state='readonly')
self.bitrateSelect.grid(row=21, column=1, sticky=W, padx=px, pady=py)
self.bitrateSelect['values'] = (
'10000',
'20000',
'50000',
'125000',
'250000',
'500000',
'1000000')
logging.info('All GUI Elements Initialized')
#self.heartbeatIndicator = Checkbutton(w, variable=self.heartbeat, onvalue=1, offvalue=0, image=None, bitmap=None, indicatoron=FALSE, text="Heartbeat")
#self.heartbeatIndicator.grid(row=17, column = 2)
# self.configCAN()
# time.sleep(1)
# self.discoverMPPTs()
if __name__ == '__main__':
freeze_support()
# initialize Tk and run mainloop
root = Tk()
# root.geometry("800x600+100+100")
# root.iconbitmap(r'c:\Python27\DLLs\py.ico')
g = GUI(root)
root.mainloop()
| |
"""
HTML Widget classes
"""
from __future__ import unicode_literals
import copy
import datetime
import re
from itertools import chain
from django.conf import settings
from django.forms.utils import to_current_timezone
from django.templatetags.static import static
from django.utils import datetime_safe, formats, six
from django.utils.dates import MONTHS
from django.utils.deprecation import (
RemovedInDjango20Warning, RenameMethodsBase,
)
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import get_format
from django.utils.html import format_html, html_safe
from django.utils.safestring import mark_safe
from django.utils.six.moves import range
from django.utils.translation import ugettext_lazy
from .renderers import get_default_renderer
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput',
'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput',
'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea',
'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select',
'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
'SplitHiddenDateTimeWidget', 'SelectDateWidget',
)
MEDIA_TYPES = ('css', 'js')
@html_safe
@python_2_unicode_compatible
class Media(object):
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [
format_html(
'<script type="text/javascript" src="{}"></script>',
self.absolute_path(path)
) for path in self._js
]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain(*[[
format_html(
'<link href="{}" type="text/css" media="{}" rel="stylesheet" />',
self.absolute_path(path), medium
) for path in self._css[medium]
] for medium in media])
def absolute_path(self, path):
"""
Given a relative or absolute path to a static asset, return an absolute
path. An absolute path will be returned unchanged while a relative path
will be passed to django.templatetags.static.static().
"""
if path.startswith(('http://', 'https://', '/')):
return path
return static(path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend is True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"""
Metaclass for classes that can have media definitions.
"""
def __new__(mcs, name, bases, attrs):
new_class = super(MediaDefiningClass, mcs).__new__(mcs, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class RenameWidgetMethods(MediaDefiningClass, RenameMethodsBase):
renamed_methods = (
('_format_value', 'format_value', RemovedInDjango20Warning),
)
class Widget(six.with_metaclass(RenameWidgetMethods)):
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
supports_microseconds = True
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.input_type == 'hidden' if hasattr(self, 'input_type') else False
def subwidgets(self, name, value, attrs=None):
context = self.get_context(name, value, attrs)
yield context['widget']
def format_value(self, value):
"""
Return a value as it should appear when rendered in a template.
"""
if value == '' or value is None:
return None
if self.is_localized:
return formats.localize_input(value)
return force_text(value)
def get_context(self, name, value, attrs):
context = {}
context['widget'] = {
'name': name,
'is_hidden': self.is_hidden,
'required': self.is_required,
'value': self.format_value(value),
'attrs': self.build_attrs(self.attrs, attrs),
'template_name': self.template_name,
}
return context
def render(self, name, value, attrs=None, renderer=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
"""
context = self.get_context(name, value, attrs)
return self._render(self.template_name, context, renderer)
def _render(self, template_name, context, renderer=None):
if renderer is None:
renderer = get_default_renderer()
return mark_safe(renderer.render(template_name, context))
def build_attrs(self, base_attrs, extra_attrs=None):
"Helper function for building an attribute dictionary."
attrs = base_attrs.copy()
if extra_attrs is not None:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in data
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
def use_required_attribute(self, initial):
return not self.is_hidden
class Input(Widget):
"""
Base class for all <input> widgets.
"""
input_type = None # Subclasses must define this.
template_name = 'django/forms/widgets/input.html'
def __init__(self, attrs=None):
if attrs is not None:
attrs = attrs.copy()
self.input_type = attrs.pop('type', self.input_type)
super(Input, self).__init__(attrs)
def get_context(self, name, value, attrs):
context = super(Input, self).get_context(name, value, attrs)
context['widget']['type'] = self.input_type
return context
class TextInput(Input):
input_type = 'text'
template_name = 'django/forms/widgets/text.html'
class NumberInput(Input):
input_type = 'number'
template_name = 'django/forms/widgets/number.html'
class EmailInput(Input):
input_type = 'email'
template_name = 'django/forms/widgets/email.html'
class URLInput(Input):
input_type = 'url'
template_name = 'django/forms/widgets/url.html'
class PasswordInput(Input):
input_type = 'password'
template_name = 'django/forms/widgets/password.html'
def __init__(self, attrs=None, render_value=False):
super(PasswordInput, self).__init__(attrs)
self.render_value = render_value
def get_context(self, name, value, attrs):
if not self.render_value:
value = None
return super(PasswordInput, self).get_context(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
template_name = 'django/forms/widgets/hidden.html'
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
template_name = 'django/forms/widgets/multiple_hidden.html'
def get_context(self, name, value, attrs):
context = super(MultipleHiddenInput, self).get_context(name, value, attrs)
final_attrs = context['widget']['attrs']
id_ = context['widget']['attrs'].get('id')
subwidgets = []
for index, value_ in enumerate(context['widget']['value']):
widget_attrs = final_attrs.copy()
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
widget_attrs['id'] = '%s_%s' % (id_, index)
widget = HiddenInput()
widget.is_required = self.is_required
subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def format_value(self, value):
return [] if value is None else value
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
template_name = 'django/forms/widgets/file.html'
def format_value(self, value):
"""File input never renders a value."""
return
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name)
def value_omitted_from_data(self, data, files, name):
return name not in files
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
clear_checkbox_label = ugettext_lazy('Clear')
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
template_name = 'django/forms/widgets/clearable_file_input.html'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def is_initial(self, value):
"""
Return whether value is considered to be initial value.
"""
return bool(value and getattr(value, 'url', False))
def format_value(self, value):
"""
Return the file object if it has a defined url attribute.
"""
if self.is_initial(value):
return value
def get_context(self, name, value, attrs):
context = super(ClearableFileInput, self).get_context(name, value, attrs)
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
context['widget'].update({
'checkbox_name': checkbox_name,
'checkbox_id': checkbox_id,
'is_initial': self.is_initial(value),
'input_text': self.input_text,
'initial_text': self.initial_text,
'clear_checkbox_label': self.clear_checkbox_label,
})
return context
def value_from_datadict(self, data, files, name):
upload = super(ClearableFileInput, self).value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
def use_required_attribute(self, initial):
return super(ClearableFileInput, self).use_required_attribute(initial) and not initial
def value_omitted_from_data(self, data, files, name):
return (
super(ClearableFileInput, self).value_omitted_from_data(data, files, name) and
self.clear_checkbox_name(name) not in data
)
class Textarea(Widget):
template_name = 'django/forms/widgets/textarea.html'
def __init__(self, attrs=None):
# Use slightly better defaults than HTML's 20x2 box
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super(Textarea, self).__init__(default_attrs)
class DateTimeBaseInput(TextInput):
format_key = ''
supports_microseconds = False
def __init__(self, attrs=None, format=None):
super(DateTimeBaseInput, self).__init__(attrs)
self.format = format if format else None
def format_value(self, value):
if value is not None:
# localize_input() returns str on Python 2.
return force_text(formats.localize_input(value, self.format or formats.get_format(self.format_key)[0]))
class DateInput(DateTimeBaseInput):
format_key = 'DATE_INPUT_FORMATS'
template_name = 'django/forms/widgets/date.html'
class DateTimeInput(DateTimeBaseInput):
format_key = 'DATETIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/datetime.html'
class TimeInput(DateTimeBaseInput):
format_key = 'TIME_INPUT_FORMATS'
template_name = 'django/forms/widgets/time.html'
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Input):
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox.html'
def __init__(self, attrs=None, check_test=None):
super(CheckboxInput, self).__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def format_value(self, value):
"""Only return the 'value' attribute if value isn't empty."""
if value is True or value is False or value is None or value == '':
return
return force_text(value)
def get_context(self, name, value, attrs):
if self.check_test(value):
if attrs is None:
attrs = {}
attrs['checked'] = True
return super(CheckboxInput, self).get_context(name, value, attrs)
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, six.string_types):
value = values.get(value.lower(), value)
return bool(value)
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
class ChoiceWidget(Widget):
allow_multiple_selected = False
input_type = None
template_name = None
option_template_name = None
add_id_index = True
checked_attribute = {'checked': True}
option_inherits_attrs = True
def __init__(self, attrs=None, choices=()):
super(ChoiceWidget, self).__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self.choices)
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None):
"""
Yield all "subwidgets" of this widget. Used to enable iterating
options from a BoundField for choice widgets.
"""
value = self.format_value(value)
for option in self.options(name, value, attrs):
yield option
def options(self, name, value, attrs=None):
"""Yield a flat list of options for this widgets."""
for group in self.optgroups(name, value, attrs):
for option in group[1]:
yield option
def optgroups(self, name, value, attrs=None):
"""Return a list of optgroups for this widget."""
groups = []
has_selected = False
for index, (option_value, option_label) in enumerate(chain(self.choices)):
if option_value is None:
option_value = ''
subgroup = []
if isinstance(option_label, (list, tuple)):
group_name = option_value
subindex = 0
choices = option_label
else:
group_name = None
subindex = None
choices = [(option_value, option_label)]
groups.append((group_name, subgroup, index))
for subvalue, sublabel in choices:
selected = (
force_text(subvalue) in value and
(has_selected is False or self.allow_multiple_selected)
)
if selected is True and has_selected is False:
has_selected = True
subgroup.append(self.create_option(
name, subvalue, sublabel, selected, index,
subindex=subindex, attrs=attrs,
))
if subindex is not None:
subindex += 1
return groups
def create_option(self, name, value, label, selected, index, subindex=None, attrs=None):
index = str(index) if subindex is None else "%s_%s" % (index, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
option_attrs['id'] = self.id_for_label(option_attrs['id'], index)
return {
'name': name,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
}
def get_context(self, name, value, attrs):
context = super(ChoiceWidget, self).get_context(name, value, attrs)
context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs)
context['wrap_label'] = True
return context
def id_for_label(self, id_, index='0'):
"""
Use an incremented id for each option where the main widget
references the zero index.
"""
if id_ and self.add_id_index:
id_ = '%s_%s' % (id_, index)
return id_
def value_from_datadict(self, data, files, name):
getter = data.get
if self.allow_multiple_selected:
try:
getter = data.getlist
except AttributeError:
pass
return getter(name)
def format_value(self, value):
"""Return selected values as a list."""
if not isinstance(value, (tuple, list)):
value = [value]
return [force_text(v) if v is not None else '' for v in value]
class Select(ChoiceWidget):
input_type = 'select'
template_name = 'django/forms/widgets/select.html'
option_template_name = 'django/forms/widgets/select_option.html'
add_id_index = False
checked_attribute = {'selected': True}
option_inherits_attrs = False
def get_context(self, name, value, attrs):
context = super(Select, self).get_context(name, value, attrs)
if self.allow_multiple_selected:
context['widget']['attrs']['multiple'] = 'multiple'
return context
@staticmethod
def _choice_has_empty_value(choice):
"""Return True if the choice's value is empty string or None."""
value, _ = choice
return (
(isinstance(value, six.string_types) and not bool(value)) or
value is None
)
def use_required_attribute(self, initial):
"""
Don't render 'required' if the first <option> has a value, as that's
invalid HTML.
"""
use_required_attribute = super(Select, self).use_required_attribute(initial)
# 'required' is always okay for <select multiple>.
if self.allow_multiple_selected:
return use_required_attribute
first_choice = next(iter(self.choices), None)
return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (
('1', ugettext_lazy('Unknown')),
('2', ugettext_lazy('Yes')),
('3', ugettext_lazy('No')),
)
super(NullBooleanSelect, self).__init__(attrs, choices)
def format_value(self, value):
try:
return {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
return '1'
def value_from_datadict(self, data, files, name):
value = data.get(name)
return {
'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False,
}.get(value)
class SelectMultiple(Select):
allow_multiple_selected = True
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def value_omitted_from_data(self, data, files, name):
# An unselected <select multiple> doesn't appear in POST data, so it's
# never known if the value is actually omitted.
return False
class RadioSelect(ChoiceWidget):
input_type = 'radio'
template_name = 'django/forms/widgets/radio.html'
option_template_name = 'django/forms/widgets/radio_option.html'
class CheckboxSelectMultiple(ChoiceWidget):
allow_multiple_selected = True
input_type = 'checkbox'
template_name = 'django/forms/widgets/checkbox_select.html'
option_template_name = 'django/forms/widgets/checkbox_option.html'
def use_required_attribute(self, initial):
# Don't use the 'required' attribute because browser validation would
# require all checkboxes to be checked instead of at least one.
return False
def value_omitted_from_data(self, data, files, name):
# HTML checkboxes don't appear in POST data if not checked, so it's
# never known if the value is actually omitted.
return False
def id_for_label(self, id_, index=None):
""""
Don't include for="field_0" in <label> because clicking such a label
would toggle the first checkbox.
"""
if index is None:
return ''
return super(CheckboxSelectMultiple, self).id_for_label(id_, index)
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
In addition to the values added by Widget.get_context(), this widget
adds a list of subwidgets to the context as widget['subwidgets'].
These can be looped over and rendered like normal widgets.
You'll probably want to use this class with MultiValueField.
"""
template_name = 'django/forms/widgets/multiwidget.html'
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super(MultiWidget, self).__init__(attrs)
@property
def is_hidden(self):
return all(w.is_hidden for w in self.widgets)
def get_context(self, name, value, attrs):
context = super(MultiWidget, self).get_context(name, value, attrs)
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = context['widget']['attrs']
input_type = final_attrs.pop('type', None)
id_ = final_attrs.get('id')
subwidgets = []
for i, widget in enumerate(self.widgets):
if input_type is not None:
widget.input_type = input_type
widget_name = '%s_%s' % (name, i)
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
widget_attrs = final_attrs.copy()
widget_attrs['id'] = '%s_%s' % (id_, i)
else:
widget_attrs = final_attrs
subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def id_for_label(self, id_):
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def value_omitted_from_data(self, data, files, name):
return all(
widget.value_omitted_from_data(data, files, name + '_%s' % i)
for i, widget in enumerate(self.widgets)
)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(MultiWidget, self).__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
supports_microseconds = False
template_name = 'django/forms/widgets/splitdatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None):
widgets = (
DateInput(attrs=attrs, format=date_format),
TimeInput(attrs=attrs, format=time_format),
)
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A Widget that splits datetime input into two <input type="hidden"> inputs.
"""
template_name = 'django/forms/widgets/splithiddendatetime.html'
def __init__(self, attrs=None, date_format=None, time_format=None):
super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format)
for widget in self.widgets:
widget.input_type = 'hidden'
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
template_name = 'django/forms/widgets/select_date.html'
input_type = 'select'
select_widget = Select
date_re = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
def __init__(self, attrs=None, years=None, months=None, empty_label=None):
self.attrs = attrs or {}
# Optional list or tuple of years to use in the "year" select box.
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year + 10)
# Optional dict of months to use in the "month" select box.
if months:
self.months = months
else:
self.months = MONTHS
# Optional string, list, or tuple to use as empty_label.
if isinstance(empty_label, (list, tuple)):
if not len(empty_label) == 3:
raise ValueError('empty_label list/tuple must have 3 elements.')
self.year_none_value = (0, empty_label[0])
self.month_none_value = (0, empty_label[1])
self.day_none_value = (0, empty_label[2])
else:
if empty_label is not None:
self.none_value = (0, empty_label)
self.year_none_value = self.none_value
self.month_none_value = self.none_value
self.day_none_value = self.none_value
def get_context(self, name, value, attrs):
context = super(SelectDateWidget, self).get_context(name, value, attrs)
date_context = {}
year_choices = [(i, force_text(i)) for i in self.years]
if self.is_required is False:
year_choices.insert(0, self.year_none_value)
year_attrs = context['widget']['attrs'].copy()
year_name = self.year_field % name
year_attrs['id'] = 'id_%s' % year_name
date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context(
name=year_name,
value=context['widget']['value']['year'],
attrs=year_attrs,
)
month_choices = list(self.months.items())
if self.is_required is False:
month_choices.insert(0, self.month_none_value)
month_attrs = context['widget']['attrs'].copy()
month_name = self.month_field % name
month_attrs['id'] = 'id_%s' % month_name
date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context(
name=month_name,
value=context['widget']['value']['month'],
attrs=month_attrs,
)
day_choices = [(i, i) for i in range(1, 32)]
if self.is_required is False:
day_choices.insert(0, self.day_none_value)
day_attrs = context['widget']['attrs'].copy()
day_name = self.day_field % name
day_attrs['id'] = 'id_%s' % day_name
date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context(
name=day_name,
value=context['widget']['value']['day'],
attrs=day_attrs,
)
subwidgets = []
for field in self._parse_date_fmt():
subwidgets.append(date_context[field]['widget'])
context['widget']['subwidgets'] = subwidgets
return context
def format_value(self, value):
"""
Return a dict containing the year, month, and day of the current value.
Use dict instead of a datetime to allow invalid dates such as February
31 to display correctly.
"""
year, month, day = None, None, None
if isinstance(value, (datetime.date, datetime.datetime)):
year, month, day = value.year, value.month, value.day
elif isinstance(value, six.string_types):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
d = datetime.datetime.strptime(force_str(value), input_format)
year, month, day = d.year, d.month, d.day
except ValueError:
pass
match = self.date_re.match(value)
if match:
year, month, day = [int(val) for val in match.groups()]
return {'year': year, 'month': month, 'day': day}
@staticmethod
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
yield 'year'
elif char in 'bEFMmNn':
yield 'month'
elif char in 'dj':
yield 'day'
def id_for_label(self, id_):
for first_select in self._parse_date_fmt():
return '%s_%s' % (id_, first_select)
else:
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name)
def value_omitted_from_data(self, data, files, name):
return not any(
('{}_{}'.format(name, interval) in data)
for interval in ('year', 'month', 'day')
)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Tools for the submission of Tasks."""
from __future__ import unicode_literals, division, print_function
import os
import time
import collections
import yaml
from six.moves import cStringIO
from datetime import timedelta
from monty.io import get_open_fds
from monty.string import boxed, is_string
from monty.os.path import which
from monty.collections import AttrDict
from .utils import as_bool
try:
import apscheduler
has_sched_v3 = apscheduler.version >= "3.0.0"
except ImportError:
pass
import logging
logger = logging.getLogger(__name__)
__all__ = [
"ScriptEditor",
"PyLauncher",
"PyFlowScheduler",
]
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
def ask_yesno(prompt, default=True):
import six
# Fix python 2.x.
if six.PY2:
my_input = raw_input
else:
my_input = input
try:
answer = my_input(prompt)
except EOFError:
return default
return answer.lower().strip() in ["n", "no"]
class ScriptEditor(object):
"""Simple editor that simplifies the writing of shell scripts"""
_shell = '/bin/bash'
def __init__(self):
self._lines = []
@property
def shell(self):
return self._shell
def _add(self, text, pre=""):
if is_string(text):
self._lines.append(pre + text)
else:
self._lines.extend([pre + t for t in text])
def reset(self):
"""Reset the editor."""
try:
del self._lines
except AttributeError:
pass
def shebang(self):
"""Adds the shebang line."""
self._lines.append('#!' + self.shell)
def declare_var(self, key, val):
"""Declare a env variable. If val is None the variable is unset."""
if val is not None:
line = "export " + key + '=' + str(val)
else:
line = "unset " + key
self._add(line)
def declare_vars(self, d):
"""Declare the variables defined in the dictionary d."""
for k, v in d.items():
self.declare_var(k, v)
def export_envar(self, key, val):
"""Export an environment variable."""
line = "export " + key + "=" + str(val)
self._add(line)
def export_envars(self, env):
"""Export the environment variables contained in the dict env."""
for k, v in env.items():
self.export_envar(k, v)
def add_emptyline(self):
"""Add an empty line."""
self._add("", pre="")
def add_comment(self, comment):
"""Add a comment"""
self._add(comment, pre="# ")
def load_modules(self, modules):
"""Load the list of specified modules."""
for module in modules:
self.load_module(module)
def load_module(self, module):
self._add('module load ' + module)
def add_line(self, line):
self._add(line)
def add_lines(self, lines):
self._add(lines)
def get_script_str(self, reset=True):
"""Returns a string with the script and reset the editor if reset is True"""
s = "\n".join(l for l in self._lines)
if reset:
self.reset()
return s
class PyLauncherError(Exception):
"""Error class for PyLauncher."""
class PyLauncher(object):
"""This object handle the submission of the tasks contained in a :class:`Flow`"""
Error = PyLauncherError
def __init__(self, flow, **kwargs):
"""
Initialize the object
Args:
flow: :class:`Flow` object
kwargs:
max_njobs_inqueue:
The launcher will stop submitting jobs when the
number of jobs in the queue is >= Max number of jobs
"""
self.flow = flow
self.max_njobs_inqueue = kwargs.get("max_njobs_inqueue", 200)
def single_shot(self):
"""
Run the first :class:`Task` than is ready for execution.
Returns:
Number of jobs launched.
"""
num_launched = 0
# Get the tasks that can be executed in each workflow.
tasks = []
for work in self.flow:
try:
task = work.fetch_task_to_run()
if task is not None:
tasks.append(task)
else:
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.debug("No task to run! Possible deadlock")
except StopIteration:
logger.info("All tasks completed.")
# Submit the tasks and update the database.
if tasks:
tasks[0].start()
num_launched += 1
self.flow.pickle_dump()
return num_launched
def rapidfire(self, max_nlaunch=-1, max_loops=1, sleep_time=5):
"""
Keeps submitting `Tasks` until we are out of jobs or no job is ready to run.
Args:
max_nlaunch: Maximum number of launches. default: no limit.
max_loops: Maximum number of loops
sleep_time: seconds to sleep between rapidfire loop iterations
Returns:
The number of tasks launched.
"""
num_launched, do_exit, launched = 0, False, []
for count in range(max_loops):
if do_exit:
break
if count > 0:
time.sleep(sleep_time)
tasks = self.fetch_tasks_to_run()
# I don't know why but we receive duplicated tasks.
if any(task in launched for task in tasks):
logger.critical("numtasks %d already in launched list:\n%s" % (len(task), launched))
# Preventive test.
tasks = [t for t in tasks if t not in launched]
if not tasks:
continue
for task in tasks:
fired = task.start()
if fired:
launched.append(task)
num_launched += 1
if num_launched >= max_nlaunch > 0:
logger.info('num_launched >= max_nlaunch, going back to sleep')
do_exit = True
break
# Update the database.
self.flow.pickle_dump()
return num_launched
def fetch_tasks_to_run(self):
"""
Return the list of tasks that can be submitted.
Empty list if no task has been found.
"""
tasks_to_run = []
for work in self.flow:
tasks_to_run.extend(work.fetch_alltasks_to_run())
return tasks_to_run
class PyFlowSchedulerError(Exception):
"""Exceptions raised by `PyFlowScheduler`."""
class PyFlowScheduler(object):
"""
This object schedules the submission of the tasks in an :class:`Flow`.
There are two types of errors that might occur during the execution of the jobs:
#. Python exceptions
#. Abinit Errors.
Python exceptions are easy to detect and are usually due to a bug in abinitio or random errors such as IOError.
The set of Abinit Errors is much much broader. It includes wrong input data, segmentation
faults, problems with the resource manager, etc. Abinitio tries to handle the most common cases
but there's still a lot of room for improvement.
Note, in particular, that `PyFlowScheduler` will shutdown automatically if
#. The number of python exceptions is > MAX_NUM_PYEXC
#. The number of Abinit Errors (i.e. the number of tasks whose status is S_ERROR) is > MAX_NUM_ERRORS
#. The number of jobs launched becomes greater than (`safety_ratio` * total_number_of_tasks).
#. The scheduler will send an email to the user (specified by `mailto`) every `remindme_s` seconds.
If the mail cannot be sent, it will shutdown automatically.
This check prevents the scheduler from being trapped in an infinite loop.
"""
# Configuration file.
YAML_FILE = "scheduler.yml"
USER_CONFIG_DIR = os.path.join(os.getenv("HOME"), ".abinit", "abipy")
Error = PyFlowSchedulerError
def __init__(self, **kwargs):
"""
Args:
weeks: number of weeks to wait
days: number of days to wait
hours: number of hours to wait
minutes: number of minutes to wait
seconds: number of seconds to wait
verbose: (int) verbosity level
max_njobs_inque: Limit on the number of jobs that can be present in the queue
use_dynamic_manager: True if the :class:`TaskManager` must be re-initialized from
file before launching the jobs. Default: False
max_nlaunches: Maximum number of tasks launched by radpifire (default -1 i.e. no limit)
"""
# Options passed to the scheduler.
self.sched_options = AttrDict(
weeks=kwargs.pop("weeks", 0),
days=kwargs.pop("days", 0),
hours=kwargs.pop("hours", 0),
minutes=kwargs.pop("minutes", 0),
seconds=kwargs.pop("seconds", 0),
#start_date=kwargs.pop("start_date", None),
)
if all(not v for v in self.sched_options.values()):
raise self.Error("Wrong set of options passed to the scheduler.")
self.mailto = kwargs.pop("mailto", None)
self.verbose = int(kwargs.pop("verbose", 0))
self.use_dynamic_manager = kwargs.pop("use_dynamic_manager", False)
self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
self.contact_resource_manager = as_bool(kwargs.pop("contact_resource_manager", False))
self.remindme_s = float(kwargs.pop("remindme_s", 4 * 24 * 3600))
self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
#self.max_etime_s = kwargs.pop("max_etime_s", )
self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
self.debug = kwargs.pop("debug", 0)
if kwargs:
raise self.Error("Unknown arguments %s" % kwargs)
if has_sched_v3:
from apscheduler.schedulers.blocking import BlockingScheduler
self.sched = BlockingScheduler()
else:
from apscheduler.scheduler import Scheduler
self.sched = Scheduler(standalone=True)
self.nlaunch = 0
self.num_reminders = 1
# Used to keep track of the exceptions raised while the scheduler is running
self.exceptions = collections.deque(maxlen=self.max_num_pyexcs + 10)
# Used to push additional info during the execution.
self.history = collections.deque(maxlen=100)
@classmethod
def from_file(cls, filepath):
"""Read the configuration parameters from a Yaml file."""
with open(filepath, "r") as fh:
return cls(**yaml.load(fh))
@classmethod
def from_string(cls, s):
"""Create an istance from string s containing a YAML dictionary."""
stream = cStringIO(s)
stream.seek(0)
return cls(**yaml.load(stream))
@classmethod
def from_user_config(cls):
"""
Initialize the :class:`PyFlowScheduler` from the YAML file 'scheduler.yml'.
Search first in the working directory and then in the configuration directory of abipy.
Raises:
RuntimeError if file is not found.
"""
# Try in the current directory.
path = os.path.join(os.getcwd(), cls.YAML_FILE)
if os.path.exists(path):
return cls.from_file(path)
# Try in the configuration directory.
path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)
if os.path.exists(path):
return cls.from_file(path)
raise cls.Error("Cannot locate %s neither in current directory nor in %s" % (cls.YAML_FILE, path))
def __str__(self):
"""String representation."""
lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
app = lines.append
app("Scheduler options: %s" % str(self.sched_options))
app(80 * "=")
app(str(self.flow))
return "\n".join(lines)
@property
def pid(self):
"""The pid of the process associated to the scheduler."""
try:
return self._pid
except AttributeError:
self._pid = os.getpid()
return self._pid
@property
def pid_file(self):
"""
Absolute path of the file with the pid.
The file is located in the workdir of the flow
"""
return self._pid_file
@property
def flow(self):
"""`Flow`."""
return self._flow
@property
def num_excs(self):
"""Number of exceptions raised so far."""
return len(self.exceptions)
def get_delta_etime(self):
"""Returns a `timedelta` object representing with the elapsed time."""
return timedelta(seconds=(time.time() - self.start_time))
def add_flow(self, flow):
"""Add an :class:`Flow` flow to the scheduler."""
if hasattr(self, "_flow"):
raise self.Error("Only one flow can be added to the scheduler.")
pid_file = os.path.join(flow.workdir, "_PyFlowScheduler.pid")
if os.path.isfile(pid_file):
flow.show_status()
raise self.Error("""\
pid_file %s already exists
There are two possibilities:
1) There's an another instance of PyFlowScheduler running
2) The previous scheduler didn't exit in a clean way
To solve case 1:
Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
Then you can restart the new scheduler.
To solve case 2:
Remove the pid_file and restart the scheduler.
Exiting""" % pid_file)
with open(pid_file, "w") as fh:
fh.write(str(self.pid))
self._pid_file = pid_file
self._flow = flow
def start(self):
"""
Starts the scheduler in a new thread. Returns True if success.
In standalone mode, this method will block until there are no more scheduled jobs.
"""
self.history.append("Started on %s" % time.asctime())
self.start_time = time.time()
if has_sched_v3:
self.sched.add_job(self.callback, "interval", **self.sched_options)
else:
self.sched.add_interval_job(self.callback, **self.sched_options)
errors = self.flow.look_before_you_leap()
if errors:
self.exceptions.append(errors)
return False
# Try to run the job immediately. If something goes wrong return without initializing the scheduler.
self._runem_all()
if self.exceptions:
self.cleanup()
self.send_email(msg="Error while trying to run the flow for the first time!\n %s" % self.exceptions)
return False
try:
self.sched.start()
return True
except KeyboardInterrupt:
self.shutdown(msg="KeyboardInterrupt from user")
if ask_yesno("Do you want to cancel all the jobs in the queue? [Y/n]"):
self.flow.cancel()
self.flow.pickle_dump()
return False
def _runem_all(self):
"""
This function checks the status of all tasks,
tries to fix tasks that went unconverged, abicritical, or queuecritical
and tries to run all the tasks that can be submitted.+
"""
excs = []
flow = self.flow
# Allow to change the manager at run-time
if self.use_dynamic_manager:
from pymatgen.io.abinitio.tasks import TaskManager
new_manager = TaskManager.from_user_config()
for work in flow:
work.set_manager(new_manager)
nqjobs = 0
if self.contact_resource_manager:
# This call is expensive and therefore it's optional
nqjobs = flow.get_njobs_in_queue()
if nqjobs is None:
nqjobs = 0
if flow.manager.has_queue: logger.warning('Cannot get njobs_inqueue')
if nqjobs >= self.max_njobs_inqueue:
logger.info("Too many jobs in the queue, returning")
return
if self.max_nlaunches == -1:
max_nlaunch = self.max_njobs_inqueue - nqjobs
else:
max_nlaunch = min(self.max_njobs_inqueue - nqjobs, self.max_nlaunches)
# check status and print it.
flow.check_status(show=False)
# fix problems
# Try to restart the unconverged tasks
# todo donot fire here but prepare for fireing in rapidfire
for task in self.flow.unconverged_tasks:
try:
logger.info("Flow will try restart task %s" % task)
fired = task.restart()
if fired:
self.nlaunch += 1
max_nlaunch -= 1
if max_nlaunch == 0:
logger.info("Restart: too many jobs in the queue, returning")
flow.pickle_dump()
return
except task.RestartError:
excs.append(straceback())
# move here from withing rapid fire ...
# fix only prepares for restarting, and sets to ready
nfixed = flow.fix_abi_critical()
if nfixed: print("Fixed %d AbiCritical errors" % nfixed)
# Temporarily disable by MG because I don't know if fix_critical works after the
# introduction of the new qadapters
if False:
nfixed = flow.fix_queue_critical()
if nfixed: print("Fixed %d QueueCritical errors" % nfixed)
# update database
flow.pickle_dump()
# Submit the tasks that are ready.
try:
nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch, sleep_time=10)
self.nlaunch += nlaunch
if nlaunch:
print("[%s] Number of launches: %d" % (time.asctime(), nlaunch))
except Exception:
excs.append(straceback())
flow.show_status()
if excs:
logger.critical("*** Scheduler exceptions:\n *** %s" % "\n".join(excs))
self.exceptions.extend(excs)
def callback(self):
"""The function that will be executed by the scheduler."""
try:
return self._callback()
except:
# All exceptions raised here will trigger the shutdown!
self.exceptions.append(straceback())
self.shutdown(msg="Exception raised in callback!")
def _callback(self):
"""The actual callback."""
if self.debug:
# Show the number of open file descriptors
print(">>>>> _callback: Number of open file descriptors: %s" % get_open_fds())
self._runem_all()
# Mission accomplished. Shutdown the scheduler.
all_ok = self.flow.all_ok
if self.verbose:
print("all_ok", all_ok)
if all_ok:
self.shutdown(msg="All tasks have reached S_OK. Will shutdown the scheduler and exit")
# Handle failures.
err_msg = ""
# Shall we send a reminder to the user?
delta_etime = self.get_delta_etime()
if delta_etime.total_seconds() > self.num_reminders * self.remindme_s:
self.num_reminders += 1
msg = ("Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s " %
(self.pid, self.flow, delta_etime))
retcode = self.send_email(msg, tag="[REMINDER]")
if retcode:
# Cannot send mail, shutdown now!
msg += ("\nThe scheduler tried to send an e-mail to remind the user\n" +
" but send_email returned %d. Aborting now" % retcode)
err_msg += msg
#if delta_etime.total_seconds() > self.max_etime_s:
# err_msg += "\nExceeded max_etime_s %s. Will shutdown the scheduler and exit" % self.max_etime_s
# Too many exceptions. Shutdown the scheduler.
if self.num_excs > self.max_num_pyexcs:
msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
self.num_excs, self.max_num_pyexcs)
err_msg += boxed(msg)
# Paranoid check: disable the scheduler if we have submitted
# too many jobs (it might be due to some bug or other external reasons
# such as race conditions between difference callbacks!)
if self.nlaunch > self.safety_ratio * self.flow.num_tasks:
msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
self.nlaunch, self.flow.num_tasks)
err_msg += boxed(msg)
# Count the number of tasks with status == S_ERROR.
if self.flow.num_errored_tasks > self.max_num_abierrs:
msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
self.flow.num_errored_tasks, self.max_num_abierrs)
err_msg += boxed(msg)
deadlocked, runnables, running = self.flow.deadlocked_runnables_running()
#print("\ndeadlocked:\n", deadlocked, "\nrunnables:\n", runnables, "\nrunning\n", running)
if deadlocked and not runnables and not running:
msg = "No runnable job with deadlocked tasks:\n %s\nWill shutdown the scheduler and exit" % str(deadlocked)
err_msg += boxed(msg)
if err_msg:
# Something wrong. Quit
self.shutdown(err_msg)
return len(self.exceptions)
def cleanup(self):
"""Cleanup routine: remove the pid file and save the pickle database"""
try:
os.remove(self.pid_file)
except OSError:
logger.critical("Could not remove pid_file")
# Save the final status of the flow.
self.flow.pickle_dump()
def shutdown(self, msg):
"""Shutdown the scheduler."""
try:
self.cleanup()
self.history.append("Completed on %s" % time.asctime())
self.history.append("Elapsed time %s" % self.get_delta_etime())
if self.debug:
print(">>>>> shutdown: Number of open file descriptors: %s" % get_open_fds())
retcode = self.send_email(msg)
if self.debug:
print("send_mail retcode", retcode)
# Write file with the list of exceptions:
if self.exceptions:
dump_file = os.path.join(self.flow.workdir, "_exceptions")
with open(dump_file, "w") as fh:
fh.writelines(self.exceptions)
fh.write("Shutdown message:\n%s" % msg)
lines = []
app = lines.append
app("Submitted on %s" % time.ctime(self.start_time))
app("Completed on %s" % time.asctime())
app("Elapsed time %s" % str(self.get_delta_etime()))
if self.flow.all_ok:
app("Flow completed successfully")
else:
app("Flow didn't complete successfully")
app("Shutdown message:\n%s" % msg)
print("\n".join(lines))
finally:
# Shutdown the scheduler thus allowing the process to exit.
logger.debug('this should be the shutdown of the scheduler')
# Unschedule all the jobs before calling shutdown
#self.sched.print_jobs()
for job in self.sched.get_jobs():
self.sched.unschedule_job(job)
#self.sched.print_jobs()
self.sched.shutdown()
# Uncomment the line below if shutdown does not work!
#os.system("kill -9 %d" % os.getpid())
def send_email(self, msg, tag=None):
"""
Send an e-mail before completing the shutdown.
Returns 0 if success.
"""
try:
return self._send_email(msg, tag)
except:
self.exceptions.append(straceback())
return -2
def _send_email(self, msg, tag):
if self.mailto is None:
return -1
header = msg.splitlines()
app = header.append
app("Submitted on %s" % time.ctime(self.start_time))
app("Completed on %s" % time.asctime())
app("Elapsed time %s" % str(self.get_delta_etime()))
app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
app("Number of unconverged tasks: %d" % self.flow.num_unconverged_tasks)
strio = cStringIO()
strio.writelines("\n".join(header) + 4 * "\n")
# Add the status of the flow.
self.flow.show_status(stream=strio)
if self.exceptions:
# Report the list of exceptions.
strio.writelines(self.exceptions)
if tag is None:
tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"
return sendmail(subject=self.flow.name + tag, text=strio.getvalue(), mailto=self.mailto)
def sendmail(subject, text, mailto, sender=None):
"""
Sends an e-mail with unix sendmail.
Args:
subject: String with the subject of the mail.
text: String with the body of the mail.
mailto: String or list of string with the recipients.
sender: string with the sender address.
If sender is None, username@hostname is used.
Returns:
Exit status
"""
def user_at_host():
from socket import gethostname
return os.getlogin() + "@" + gethostname()
# Body of the message.
sender = user_at_host() if sender is None else sender
if is_string(mailto): mailto = [mailto]
from email.mime.text import MIMEText
mail = MIMEText(text)
mail["Subject"] = subject
mail["From"] = sender
mail["To"] = ", ".join(mailto)
msg = mail.as_string()
# sendmail works much better than the python interface.
# Note that sendmail is available only on Unix-like OS.
from subprocess import Popen, PIPE
sendmail = which("sendmail")
if sendmail is None: return -1
p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE)
outdata, errdata = p.communicate(msg)
return len(errdata)
#def test_sendmail():
# retcode = sendmail("sendmail_test", text="hello\nworld", mailto="nobody@nowhere.com")
# print("Retcode", retcode)
| |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### In this algorithm we submit/update/cancel each order type
### </summary>
### <meta name="tag" content="trading and orders" />
### <meta name="tag" content="placing orders" />
### <meta name="tag" content="managing orders" />
### <meta name="tag" content="order tickets" />
### <meta name="tag" content="updating orders" />
class OrderTicketDemoAlgorithm(QCAlgorithm):
'''In this algorithm we submit/update/cancel each order type'''
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2013,10,7) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
# Find more symbols here: http://quantconnect.com/data
equity = self.AddEquity("SPY")
self.spy = equity.Symbol
self.__openMarketOnOpenOrders = []
self.__openMarketOnCloseOrders = []
self.__openLimitOrders = []
self.__openStopMarketOrders = []
self.__openStopLimitOrders = []
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.'''
# MARKET ORDERS
self.MarketOrders()
# LIMIT ORDERS
self.LimitOrders()
# STOP MARKET ORDERS
self.StopMarketOrders()
## STOP LIMIT ORDERS
self.StopLimitOrders()
## MARKET ON OPEN ORDERS
self.MarketOnOpenOrders()
## MARKET ON CLOSE ORDERS
self.MarketOnCloseOrders()
def MarketOrders(self):
''' MarketOrders are the only orders that are processed synchronously by default, so
they'll fill by the next line of code. This behavior equally applies to live mode.
You can opt out of this behavior by specifying the 'asynchronous' parameter as True.'''
if self.TimeIs(7, 9, 31):
self.Log("Submitting MarketOrder")
# submit a market order to buy 10 shares, this function returns an OrderTicket object
# we submit the order with asynchronous = False, so it block until it is filled
newTicket = self.MarketOrder(self.spy, 10, asynchronous = False)
if newTicket.Status != OrderStatus.Filled:
self.Log("Synchronous market order was not filled synchronously!")
self.Quit()
# we can also submit the ticket asynchronously. In a backtest, we'll still perform the fill
# before the next time events for your algorithm. here we'll submit the order asynchronously
# and try to cancel it, sometimes it will, sometimes it will be filled first.
newTicket = self.MarketOrder(self.spy, 10, asynchronous = True)
response = newTicket.Cancel("Attempt to cancel async order")
if response.IsSuccess:
self.Log("Successfully canceled async market order: {0}".format(newTicket.OrderId))
else:
self.Log("Unable to cancel async market order: {0}".format(response.ErrorCode))
def LimitOrders(self):
'''LimitOrders are always processed asynchronously. Limit orders are used to
set 'good' entry points for an order. For example, you may wish to go
long a stock, but want a good price, so can place a LimitOrder to buy with
a limit price below the current market price. Likewise the opposite is True
when selling, you can place a LimitOrder to sell with a limit price above the
current market price to get a better sale price.
You can submit requests to update or cancel the LimitOrder at any time.
The 'LimitPrice' for an order can be retrieved from the ticket using the
OrderTicket.Get(OrderField) method, for example:
Code:
currentLimitPrice = orderTicket.Get(OrderField.LimitPrice)'''
if self.TimeIs(7, 12, 0):
self.Log("Submitting LimitOrder")
# submit a limit order to buy 10 shares at .1% below the bar's close
close = self.Securities[self.spy.Value].Close
newTicket = self.LimitOrder(self.spy, 10, close * .999)
self.__openLimitOrders.append(newTicket)
# submit another limit order to sell 10 shares at .1% above the bar's close
newTicket = self.LimitOrder(self.spy, -10, close * 1.001)
self.__openLimitOrders.append(newTicket)
# when we submitted new limit orders we placed them into this list,
# so while there's two entries they're still open and need processing
if len(self.__openLimitOrders) == 2:
openOrders = self.__openLimitOrders
# check if either is filled and cancel the other
longOrder = openOrders[0]
shortOrder = openOrders[1]
if self.CheckPairOrdersForFills(longOrder, shortOrder):
self.__openLimitOrders = []
return
# if niether order has filled, bring in the limits by a penny
newLongLimit = longOrder.Get(OrderField.LimitPrice) + 0.01
newShortLimit = shortOrder.Get(OrderField.LimitPrice) - 0.01
self.Log("Updating limits - Long: {0:.2f} Short: {1:.2f}".format(newLongLimit, newShortLimit))
updateOrderFields = UpdateOrderFields()
updateOrderFields.LimitPrice = newLongLimit
updateOrderFields.Tag = "Update #{0}".format(len(longOrder.UpdateRequests) + 1)
longOrder.Update(updateOrderFields)
updateOrderFields = UpdateOrderFields()
updateOrderFields.LimitPrice = newShortLimit
updateOrderFields.Tag = "Update #{0}".format(len(shortOrder.UpdateRequests) + 1)
shortOrder.Update(updateOrderFields)
def StopMarketOrders(self):
'''StopMarketOrders work in the opposite way that limit orders do.
When placing a long trade, the stop price must be above current
market price. In this way it's a 'stop loss' for a short trade.
When placing a short trade, the stop price must be below current
market price. In this way it's a 'stop loss' for a long trade.
You can submit requests to update or cancel the StopMarketOrder at any time.
The 'StopPrice' for an order can be retrieved from the ticket using the
OrderTicket.Get(OrderField) method, for example:
Code:
currentStopPrice = orderTicket.Get(OrderField.StopPrice)'''
if self.TimeIs(7, 12 + 4, 0):
self.Log("Submitting StopMarketOrder")
# a long stop is triggered when the price rises above the value
# so we'll set a long stop .25% above the current bar's close
close = self.Securities[self.spy.Value].Close
newTicket = self.StopMarketOrder(self.spy, 10, close * 1.0025)
self.__openStopMarketOrders.append(newTicket)
# a short stop is triggered when the price falls below the value
# so we'll set a short stop .25% below the current bar's close
newTicket = self.StopMarketOrder(self.spy, -10, close * .9975)
self.__openStopMarketOrders.append(newTicket)
# when we submitted new stop market orders we placed them into this list,
# so while there's two entries they're still open and need processing
if len(self.__openStopMarketOrders) == 2:
# check if either is filled and cancel the other
longOrder = self.__openStopMarketOrders[0]
shortOrder = self.__openStopMarketOrders[1]
if self.CheckPairOrdersForFills(longOrder, shortOrder):
self.__openStopMarketOrders = []
return
# if neither order has filled, bring in the stops by a penny
newLongStop = longOrder.Get(OrderField.StopPrice) - 0.01
newShortStop = shortOrder.Get(OrderField.StopPrice) + 0.01
self.Log("Updating stops - Long: {0:.2f} Short: {1:.2f}".format(newLongStop, newShortStop))
updateOrderFields = UpdateOrderFields()
updateOrderFields.StopPrice = newLongStop
updateOrderFields.Tag = "Update #{0}".format(len(longOrder.UpdateRequests) + 1)
longOrder.Update(updateOrderFields)
updateOrderFields = UpdateOrderFields()
updateOrderFields.StopPrice = newShortStop
updateOrderFields.Tag = "Update #{0}".format(len(shortOrder.UpdateRequests) + 1)
shortOrder.Update(updateOrderFields)
self.Log("Updated price - Long: {0} Short: {1}".format(longOrder.Get(OrderField.StopPrice), shortOrder.Get(OrderField.StopPrice)))
def StopLimitOrders(self):
'''StopLimitOrders work as a combined stop and limit order. First, the
price must pass the stop price in the same way a StopMarketOrder works,
but then we're also gauranteed a fill price at least as good as the
limit price. This order type can be beneficial in gap down scenarios
where a StopMarketOrder would have triggered and given the not as beneficial
gapped down price, whereas the StopLimitOrder could protect you from
getting the gapped down price through prudent placement of the limit price.
You can submit requests to update or cancel the StopLimitOrder at any time.
The 'StopPrice' or 'LimitPrice' for an order can be retrieved from the ticket
using the OrderTicket.Get(OrderField) method, for example:
Code:
currentStopPrice = orderTicket.Get(OrderField.StopPrice)
currentLimitPrice = orderTicket.Get(OrderField.LimitPrice)'''
if self.TimeIs(8, 12, 1):
self.Log("Submitting StopLimitOrder")
# a long stop is triggered when the price rises above the
# value so we'll set a long stop .25% above the current bar's
# close now we'll also be setting a limit, this means we are
# gauranteed to get at least the limit price for our fills,
# so make the limit price a little higher than the stop price
close = self.Securities[self.spy.Value].Close
newTicket = self.StopLimitOrder(self.spy, 10, close * 1.001, close - 0.03)
self.__openStopLimitOrders.append(newTicket)
# a short stop is triggered when the price falls below the
# value so we'll set a short stop .25% below the current bar's
# close now we'll also be setting a limit, this means we are
# gauranteed to get at least the limit price for our fills,
# so make the limit price a little softer than the stop price
newTicket = self.StopLimitOrder(self.spy, -10, close * .999, close + 0.03)
self.__openStopLimitOrders.append(newTicket)
# when we submitted new stop limit orders we placed them into this list,
# so while there's two entries they're still open and need processing
if len(self.__openStopLimitOrders) == 2:
longOrder = self.__openStopLimitOrders[0]
shortOrder = self.__openStopLimitOrders[1]
if self.CheckPairOrdersForFills(longOrder, shortOrder):
self.__openStopLimitOrders = []
return
# if neither order has filled, bring in the stops/limits in by a penny
newLongStop = longOrder.Get(OrderField.StopPrice) - 0.01
newLongLimit = longOrder.Get(OrderField.LimitPrice) + 0.01
newShortStop = shortOrder.Get(OrderField.StopPrice) + 0.01
newShortLimit = shortOrder.Get(OrderField.LimitPrice) - 0.01
self.Log("Updating stops - Long: {0:.2f} Short: {1:.2f}".format(newLongStop, newShortStop))
self.Log("Updating limits - Long: {0:.2f} Short: {1:.2f}".format(newLongLimit, newShortLimit))
updateOrderFields = UpdateOrderFields()
updateOrderFields.StopPrice = newLongStop
updateOrderFields.LimitPrice = newLongLimit
updateOrderFields.Tag = "Update #{0}".format(len(longOrder.UpdateRequests) + 1)
longOrder.Update(updateOrderFields)
updateOrderFields = UpdateOrderFields()
updateOrderFields.StopPrice = newShortStop
updateOrderFields.LimitPrice = newShortLimit
updateOrderFields.Tag = "Update #{0}".format(len(shortOrder.UpdateRequests) + 1)
shortOrder.Update(updateOrderFields)
def MarketOnCloseOrders(self):
'''MarketOnCloseOrders are always executed at the next market's closing price.
The only properties that can be updated are the quantity and order tag properties.'''
if self.TimeIs(9, 12, 0):
self.Log("Submitting MarketOnCloseOrder")
# open a new position or triple our existing position
qty = self.Portfolio[self.spy.Value].Quantity
qty = 100 if qty == 0 else 2*qty
newTicket = self.MarketOnCloseOrder(self.spy, qty)
self.__openMarketOnCloseOrders.append(newTicket)
if len(self.__openMarketOnCloseOrders) == 1 and self.Time.minute == 59:
ticket = self.__openMarketOnCloseOrders[0]
# check for fills
if ticket.Status == OrderStatus.Filled:
self.__openMarketOnCloseOrders = []
return
quantity = ticket.Quantity + 1
self.Log("Updating quantity - New Quantity: {0}".format(quantity))
# we can update the quantity and tag
updateOrderFields = UpdateOrderFields()
updateOrderFields.Quantity = quantity
updateOrderFields.Tag = "Update #{0}".format(len(ticket.UpdateRequests) + 1)
ticket.Update(updateOrderFields)
if self.TimeIs(self.EndDate.day, 12 + 3, 45):
self.Log("Submitting MarketOnCloseOrder to liquidate end of algorithm")
self.MarketOnCloseOrder(self.spy, -self.Portfolio[self.spy.Value].Quantity, "Liquidate end of algorithm")
def MarketOnOpenOrders(self):
'''MarketOnOpenOrders are always executed at the next
market's opening price. The only properties that can
be updated are the quantity and order tag properties.'''
if self.TimeIs(8, 12 + 2, 0):
self.Log("Submitting MarketOnOpenOrder")
# its EOD, let's submit a market on open order to short even more!
newTicket = self.MarketOnOpenOrder(self.spy, 50)
self.__openMarketOnOpenOrders.append(newTicket)
if len(self.__openMarketOnOpenOrders) == 1 and self.Time.minute == 59:
ticket = self.__openMarketOnOpenOrders[0]
# check for fills
if ticket.Status == OrderStatus.Filled:
self.__openMarketOnOpenOrders = []
return
quantity = ticket.Quantity + 1
self.Log("Updating quantity - New Quantity: {0}".format(quantity))
# we can update the quantity and tag
updateOrderFields = UpdateOrderFields()
updateOrderFields.Quantity = quantity
updateOrderFields.Tag = "Update #{0}".format(len(ticket.UpdateRequests) + 1)
ticket.Update(updateOrderFields)
def OnOrderEvent(self, orderEvent):
order = self.Transactions.GetOrderById(orderEvent.OrderId)
self.Log("{0}: {1}: {2}".format(self.Time, order.Type, orderEvent))
def CheckPairOrdersForFills(self, longOrder, shortOrder):
if longOrder.Status == OrderStatus.Filled:
self.Log("{0}: Cancelling short order, long order is filled.".format(shortOrder.OrderType))
shortOrder.Cancel("Long filled.")
return True
if shortOrder.Status == OrderStatus.Filled:
self.Log("{0}: Cancelling long order, short order is filled.".format(longOrder.OrderType))
longOrder.Cancel("Short filled")
return True
return False
def TimeIs(self, day, hour, minute):
return self.Time.day == day and self.Time.hour == hour and self.Time.minute == minute
def OnEndOfAlgorithm(self):
basicOrderTicketFilter = lambda x: x.Symbol == self.spy;
filledOrders = self.Transactions.GetOrders(lambda x: x.Status == OrderStatus.Filled);
orderTickets = self.Transactions.GetOrderTickets(basicOrderTicketFilter);
openOrders = self.Transactions.GetOpenOrders(lambda x: x.Symbol == self.spy);
openOrderTickets = self.Transactions.GetOpenOrderTickets(basicOrderTicketFilter);
remainingOpenOrders = self.Transactions.GetOpenOrdersRemainingQuantity(basicOrderTicketFilter);
# The type returned by self.Transactions.GetOrders() is iterable and not a list
# that's why we use sum() to get the size of the iterable object type
filledOrdersSize = sum(1 for order in filledOrders)
orderTicketsSize = sum(1 for ticket in orderTickets)
openOrderTicketsSize = sum(1 for ticket in openOrderTickets)
assert(filledOrdersSize == 8 and orderTicketsSize == 10), "There were expected 8 filled orders and 10 order tickets"
assert(not (len(openOrders) or openOrderTicketsSize)), "No open orders or tickets were expected"
assert(not remainingOpenOrders), "No remaining quantiy to be filled from open orders was expected"
spyOpenOrders = self.Transactions.GetOpenOrders(self.spy)
spyOpenOrderTickets = self.Transactions.GetOpenOrderTickets(self.spy)
spyOpenOrderTicketsSize = sum(1 for tickets in spyOpenOrderTickets)
spyOpenOrdersRemainingQuantity = self.Transactions.GetOpenOrdersRemainingQuantity(self.spy)
assert(not (len(spyOpenOrders) or spyOpenOrderTicketsSize)), "No open orders or tickets were expected"
assert(not spyOpenOrdersRemainingQuantity), "No remaining quantiy to be filled from open orders was expected"
defaultOrders = self.Transactions.GetOrders();
defaultOrderTickets = self.Transactions.GetOrderTickets();
defaultOpenOrders = self.Transactions.GetOpenOrders();
defaultOpenOrderTickets = self.Transactions.GetOpenOrderTickets();
defaultOpenOrdersRemaining = self.Transactions.GetOpenOrdersRemainingQuantity();
defaultOrdersSize = sum(1 for order in defaultOrders)
defaultOrderTicketsSize = sum(1 for ticket in defaultOrderTickets)
defaultOpenOrderTicketsSize = sum(1 for ticket in defaultOpenOrderTickets)
assert(defaultOrdersSize == 10 and defaultOrderTicketsSize == 10), "There were expected 10 orders and 10 order tickets"
assert(not (len(defaultOpenOrders) or defaultOpenOrderTicketsSize)), "No open orders or tickets were expected"
assert(not defaultOpenOrdersRemaining), "No remaining quantiy to be filled from open orders was expected"
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Inception-ResNet V2 model for Keras.
Reference:
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
(AAAI 2017)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_URL = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/inception_resnet_v2/')
layers = None
@keras_export('keras.applications.inception_resnet_v2.InceptionResNetV2',
'keras.applications.InceptionResNetV2')
def InceptionResNetV2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the Inception-ResNet v2 architecture.
Reference:
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
(AAAI 2017)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For InceptionResNetV2, call
`tf.keras.applications.inception_resnet_v2.preprocess_input`
on your inputs before passing them to the model.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)` (with `'channels_last'` data format)
or `(3, 299, 299)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 75.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `'avg'` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `'max'` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
x = conv2d_bn(x, 32, 3, padding='valid')
x = conv2d_bn(x, 64, 3)
x = layers.MaxPooling2D(3, strides=2)(x)
x = conv2d_bn(x, 80, 1, padding='valid')
x = conv2d_bn(x, 192, 3, padding='valid')
x = layers.MaxPooling2D(3, strides=2)(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = layers.AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3
x = layers.Concatenate(axis=channel_axis, name='mixed_5b')(branches)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(
x, scale=0.17, block_type='block35', block_idx=block_idx)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_pool]
x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(
x, scale=0.1, block_type='block17', block_idx=block_idx)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(
x, scale=0.2, block_type='block8', block_idx=block_idx)
x = inception_resnet_block(
x, scale=1., activation=None, block_type='block8', block_idx=10)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name='conv_7b')
if include_top:
# Classification block
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='inception_resnet_v2')
# Load weights.
if weights == 'imagenet':
if include_top:
fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = data_utils.get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='e693bd0210a403b3192acc6073ad2e96')
else:
fname = ('inception_resnet_v2_weights_'
'tf_dim_ordering_tf_kernels_notop.h5')
weights_path = data_utils.get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='d19885ff4a710c122648d3b5c3b684e4')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
"""Utility function to apply conv + BN.
Arguments:
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
strides: strides in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
use_bias: whether to use a bias in `Conv2D`.
name: name of the ops; will become `name + '_ac'` for the activation
and `name + '_bn'` for the batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
x = layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(
x)
if not use_bias:
bn_axis = 1 if backend.image_data_format() == 'channels_first' else 3
bn_name = None if name is None else name + '_bn'
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if activation is not None:
ac_name = None if name is None else name + '_ac'
x = layers.Activation(activation, name=ac_name)(x)
return x
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
"""Adds an Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument (which is the
block name used in the official TF-slim implementation):
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
Arguments:
x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of passing
`x` through an inception module) before adding them to the shortcut
branch. Let `r` be the output from the residual branch, the output of this
block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`, determines the network
structure in the residual branch.
block_idx: an `int` used for generating layer names. The Inception-ResNet
blocks are repeated many times in this network. We use `block_idx` to
identify each of the repetitions. For example, the first
Inception-ResNet-A block will have `block_type='block35', block_idx=0`,
and the layer names will have a common prefix `'block35_0'`.
activation: activation function to use at the end of the block (see
[activations](../activations.md)). When `activation=None`, no activation
is applied
(i.e., "linear" activation: `a(x) = x`).
Returns:
Output tensor for the block.
Raises:
ValueError: if `block_type` is not one of `'block35'`,
`'block17'` or `'block8'`.
"""
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == 'block17':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == 'block8':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError('Unknown Inception-ResNet block type. '
'Expects "block35", "block17" or "block8", '
'but got: ' + str(block_type))
block_name = block_type + '_' + str(block_idx)
channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3
mixed = layers.Concatenate(
axis=channel_axis, name=block_name + '_mixed')(
branches)
up = conv2d_bn(
mixed,
backend.int_shape(x)[channel_axis],
1,
activation=None,
use_bias=True,
name=block_name + '_conv')
x = layers.Lambda(
lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=backend.int_shape(x)[1:],
arguments={'scale': scale},
name=block_name)([x, up])
if activation is not None:
x = layers.Activation(activation, name=block_name + '_ac')(x)
return x
@keras_export('keras.applications.inception_resnet_v2.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.inception_resnet_v2.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| |
import os
import re
import sys
import fnmatch
import os.path
# for command line options and supported environment variables, please
# see the end of 'setupinfo.py'
if (2, 7) != sys.version_info[:2] < (3, 5):
print("This lxml version requires Python 2.7, 3.5 or later.")
sys.exit(1)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# make sure Cython finds include files in the project directory and not outside
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
import versioninfo
import setupinfo
# override these and pass --static for a static build. See
# doc/build.txt for more information. If you do not pass --static
# changing this will have no effect.
def static_env_list(name, separator=None):
return [x.strip() for x in os.environ.get(name, "").split(separator) if x.strip()]
STATIC_INCLUDE_DIRS = static_env_list("LXML_STATIC_INCLUDE_DIRS", separator=os.pathsep)
STATIC_LIBRARY_DIRS = static_env_list("LXML_STATIC_LIBRARY_DIRS", separator=os.pathsep)
STATIC_CFLAGS = static_env_list("LXML_STATIC_CFLAGS")
STATIC_BINARIES = static_env_list("LXML_STATIC_BINARIES", separator=os.pathsep)
# create lxml-version.h file
versioninfo.create_version_h()
lxml_version = versioninfo.version()
print("Building lxml version %s." % lxml_version)
OPTION_RUN_TESTS = setupinfo.has_option('run-tests')
branch_link = """
After an official release of a new stable series, bug fixes may become
available at
https://github.com/lxml/lxml/tree/lxml-%(branch_version)s .
Running ``easy_install lxml==%(branch_version)sbugfix`` will install
the unreleased branch state from
https://github.com/lxml/lxml/tarball/lxml-%(branch_version)s#egg=lxml-%(branch_version)sbugfix
as soon as a maintenance branch has been established. Note that this
requires Cython to be installed at an appropriate version for the build.
"""
if versioninfo.is_pre_release():
branch_link = ""
extra_options = {}
if 'setuptools' in sys.modules:
extra_options['zip_safe'] = False
extra_options['python_requires'] = (
# NOTE: keep in sync with Trove classifier list below.
'>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*')
try:
import pkg_resources
except ImportError:
pass
else:
f = open("requirements.txt", "r")
try:
deps = [str(req) for req in pkg_resources.parse_requirements(f)]
finally:
f.close()
extra_options['extras_require'] = {
'source': deps,
'cssselect': 'cssselect>=0.7',
'html5': 'html5lib',
'htmlsoup': 'BeautifulSoup4',
}
extra_options.update(setupinfo.extra_setup_args())
extra_options['package_data'] = {
'lxml': [
'etree.h',
'etree_api.h',
'lxml.etree.h',
'lxml.etree_api.h',
],
'lxml.includes': [
'*.pxd', '*.h'
],
'lxml.isoschematron': [
'resources/rng/iso-schematron.rng',
'resources/xsl/*.xsl',
'resources/xsl/iso-schematron-xslt1/*.xsl',
'resources/xsl/iso-schematron-xslt1/readme.txt'
],
}
extra_options['package_dir'] = {
'': 'src'
}
extra_options['packages'] = [
'lxml', 'lxml.includes', 'lxml.html', 'lxml.isoschematron'
]
def setup_extra_options():
is_interesting_package = re.compile('^(libxml|libxslt|libexslt)$').match
is_interesting_header = re.compile('^(zconf|zlib|.*charset)\.h$').match
def extract_files(directories, pattern='*'):
def get_files(root, dir_path, files):
return [ (root, dir_path, filename)
for filename in fnmatch.filter(files, pattern) ]
file_list = []
for dir_path in directories:
dir_path = os.path.realpath(dir_path)
for root, dirs, files in os.walk(dir_path):
rel_dir = root[len(dir_path)+1:]
if is_interesting_package(rel_dir):
file_list.extend(get_files(root, rel_dir, files))
elif not rel_dir:
# include also top-level header files (zlib/iconv)
file_list.extend(
item for item in get_files(root, rel_dir, files)
if is_interesting_header(item[-1])
)
return file_list
def build_packages(files):
packages = {}
seen = set()
for root_path, rel_path, filename in files:
if filename in seen:
# libxml2/libxslt header filenames are unique
continue
seen.add(filename)
package_path = '.'.join(rel_path.split(os.sep))
if package_path in packages:
root, package_files = packages[package_path]
if root != root_path:
print("WARNING: conflicting directories found for include package '%s': %s and %s"
% (package_path, root_path, root))
continue
else:
package_files = []
packages[package_path] = (root_path, package_files)
package_files.append(filename)
return packages
# Copy Global Extra Options
extra_opts = dict(extra_options)
# Build ext modules
ext_modules = setupinfo.ext_modules(
STATIC_INCLUDE_DIRS, STATIC_LIBRARY_DIRS,
STATIC_CFLAGS, STATIC_BINARIES)
extra_opts['ext_modules'] = ext_modules
packages = extra_opts.get('packages', list())
package_dir = extra_opts.get('package_dir', dict())
package_data = extra_opts.get('package_data', dict())
# Add lxml.include with (lxml, libxslt headers...)
# python setup.py build --static --static-deps install
# python setup.py bdist_wininst --static
if setupinfo.OPTION_STATIC:
include_dirs = [] # keep them in order
for extension in ext_modules:
for inc_dir in extension.include_dirs:
if inc_dir not in include_dirs:
include_dirs.append(inc_dir)
header_packages = build_packages(extract_files(include_dirs))
package_filename = "__init__.py"
for package_path, (root_path, filenames) in header_packages.items():
if not package_path:
# lxml.includes -> lxml.includes.extlibs
package_path = "extlibs"
package = 'lxml.includes.' + package_path
packages.append(package)
# create '__init__.py' to make sure it's considered a package
if package_filename not in filenames:
with open(os.path.join(root_path, package_filename), 'wb') as f:
pass
filenames.append(package_filename)
assert package not in package_data
package_data[package] = filenames
assert package not in package_dir
package_dir[package] = root_path
return extra_opts
setup(
name = "lxml",
version = lxml_version,
author="lxml dev team",
author_email="lxml-dev@lxml.de",
maintainer="lxml dev team",
maintainer_email="lxml-dev@lxml.de",
license="BSD",
url="https://lxml.de/",
# Commented out because this causes distutils to emit warnings
# `Unknown distribution option: 'bugtrack_url'`
# which distract folks from real causes of problems when troubleshooting
# bugtrack_url="https://bugs.launchpad.net/lxml",
project_urls={
"Source": "https://github.com/lxml/lxml",
},
description=(
"Powerful and Pythonic XML processing library"
" combining libxml2/libxslt with the ElementTree API."
),
long_description=((("""\
lxml is a Pythonic, mature binding for the libxml2 and libxslt libraries. It
provides safe and convenient access to these libraries using the ElementTree
API.
It extends the ElementTree API significantly to offer support for XPath,
RelaxNG, XML Schema, XSLT, C14N and much more.
To contact the project, go to the `project home page
<https://lxml.de/>`_ or see our bug tracker at
https://launchpad.net/lxml
In case you want to use the current in-development version of lxml,
you can get it from the github repository at
https://github.com/lxml/lxml . Note that this requires Cython to
build the sources, see the build instructions on the project home
page. To the same end, running ``easy_install lxml==dev`` will
install lxml from
https://github.com/lxml/lxml/tarball/master#egg=lxml-dev if you have
an appropriate version of Cython installed.
""" + branch_link) % {"branch_version": versioninfo.branch_version()}) +
versioninfo.changes()),
classifiers=[
versioninfo.dev_status(),
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: BSD License',
'Programming Language :: Cython',
# NOTE: keep in sync with 'python_requires' list above.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: C',
'Operating System :: OS Independent',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: XML',
'Topic :: Software Development :: Libraries :: Python Modules'
],
**setup_extra_options()
)
if OPTION_RUN_TESTS:
print("Running tests.")
import test
try:
sys.exit( test.main(sys.argv[:1]) )
except ImportError:
pass # we assume that the binaries were not built with this setup.py run
| |
# Copyright 2014 Knowledge Economy Developments Ltd
#
# Henry Gomersall
# heng@kedevelopments.co.uk
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from pyfftw import n_byte_align_empty, n_byte_align, interfaces
from .test_pyfftw_base import run_test_suites
import unittest
import numpy
from numpy import fft as np_fft
import inspect
import warnings
import copy
warnings.filterwarnings('always')
if numpy.version.version <= '1.6.2':
# We overwrite the broken _cook_nd_args with a fixed version.
from ._cook_nd_args import _cook_nd_args
numpy.fft.fftpack._cook_nd_args = _cook_nd_args
complex_dtypes = (numpy.complex64, numpy.complex128, numpy.clongdouble)
real_dtypes = (numpy.float32, numpy.float64, numpy.longdouble)
def make_complex_data(shape, dtype):
ar, ai = dtype(numpy.random.randn(2, *shape))
return ar + 1j*ai
def make_real_data(shape, dtype):
return dtype(numpy.random.randn(*shape))
functions = {
'fft': 'complex',
'ifft': 'complex',
'rfft': 'r2c',
'irfft': 'c2r',
'rfftn': 'r2c',
'hfft': 'c2r',
'ihfft': 'r2c',
'irfftn': 'c2r',
'rfft2': 'r2c',
'irfft2': 'c2r',
'fft2': 'complex',
'ifft2': 'complex',
'fftn': 'complex',
'ifftn': 'complex'}
acquired_names = ('fftfreq', 'fftshift', 'ifftshift')
class InterfacesNumpyFFTTestModule(unittest.TestCase):
''' A really simple test suite to check the module works as expected.
'''
def test_acquired_names(self):
for each_name in acquired_names:
numpy_fft_attr = getattr(numpy.fft, each_name)
acquired_attr = getattr(interfaces.numpy_fft, each_name)
self.assertIs(numpy_fft_attr, acquired_attr)
class InterfacesNumpyFFTTestFFT(unittest.TestCase):
io_dtypes = {
'complex': (complex_dtypes, make_complex_data),
'r2c': (real_dtypes, make_real_data),
'c2r': (complex_dtypes, make_complex_data)}
validator_module = np_fft
test_interface = interfaces.numpy_fft
func = 'fft'
axes_kw = 'axis'
overwrite_input_flag = 'overwrite_input'
default_s_from_shape_slicer = slice(-1, None)
test_shapes = (
((100,), {}),
((128, 64), {'axis': 0}),
((128, 32), {'axis': -1}),
((59, 100), {}),
((59, 99), {'axis': -1}),
((59, 99), {'axis': 0}),
((32, 32, 4), {'axis': 1}),
((64, 128, 16), {}),
)
# invalid_s_shapes is:
# (size, invalid_args, error_type, error_string)
invalid_args = (
((100,), ((100, 200),), TypeError, ''),
((100, 200), ((100, 200),), TypeError, ''),
((100,), (100, (-2, -1)), TypeError, ''),
((100,), (100, -20), IndexError, ''))
realinv = False
@property
def test_data(self):
for test_shape, kwargs in self.test_shapes:
axes = self.axes_from_kwargs(kwargs)
s = self.s_from_kwargs(test_shape, kwargs)
if self.realinv:
test_shape = list(test_shape)
test_shape[axes[-1]] = test_shape[axes[-1]]//2 + 1
test_shape = tuple(test_shape)
yield test_shape, s, kwargs
def __init__(self, *args, **kwargs):
super(InterfacesNumpyFFTTestFFT, self).__init__(*args, **kwargs)
# Assume python 3, but keep backwards compatibility
if not hasattr(self, 'assertRaisesRegex'):
self.assertRaisesRegex = self.assertRaisesRegexp
def validate(self, array_type, test_shape, dtype,
s, kwargs):
# Do it without the cache
# without:
interfaces.cache.disable()
self._validate(array_type, test_shape, dtype, s, kwargs)
def munge_input_array(self, array, kwargs):
return array
def _validate(self, array_type, test_shape, dtype,
s, kwargs):
input_array = self.munge_input_array(
array_type(test_shape, dtype), kwargs)
orig_input_array = copy.copy(input_array)
np_input_array = numpy.asarray(input_array)
if np_input_array.dtype == 'clongdouble':
np_input_array = numpy.complex128(input_array)
elif np_input_array.dtype == 'longdouble':
np_input_array = numpy.float64(input_array)
with warnings.catch_warnings(record=True) as w:
# We catch the warnings so as to pick up on when
# a complex array is turned into a real array
if 'axes' in kwargs:
axes = {'axes': kwargs['axes']}
elif 'axis' in kwargs:
axes = {'axis': kwargs['axis']}
else:
axes = {}
try:
test_out_array = getattr(self.validator_module, self.func)(
copy.copy(np_input_array), s, **axes)
except Exception as e:
interface_exception = None
try:
getattr(self.test_interface, self.func)(
copy.copy(input_array), s, **kwargs)
except Exception as _interface_exception:
# It's necessary to assign the exception to the
# already defined variable in Python 3.
# See http://www.python.org/dev/peps/pep-3110/#semantic-changes
interface_exception = _interface_exception
# If the test interface raised, so must this.
self.assertEqual(type(interface_exception), type(e),
msg='Interface exception raised. ' +
'Testing for: ' + repr(e))
return
output_array = getattr(self.test_interface, self.func)(
copy.copy(input_array), s, **kwargs)
if (functions[self.func] == 'r2c'):
if numpy.iscomplexobj(input_array):
if len(w) > 0:
# Make sure a warning is raised
self.assertIs(
w[-1].category, numpy.ComplexWarning)
self.assertTrue(
numpy.allclose(output_array, test_out_array,
rtol=1e-2, atol=1e-4))
input_precision_dtype = numpy.asanyarray(input_array).real.dtype
self.assertEqual(input_precision_dtype,
output_array.real.dtype)
if (not self.overwrite_input_flag in kwargs or
not kwargs[self.overwrite_input_flag]):
self.assertTrue(numpy.allclose(input_array,
orig_input_array))
return output_array
def axes_from_kwargs(self, kwargs):
argspec = inspect.getargspec(getattr(self.test_interface, self.func))
default_args = dict(list(zip(
argspec.args[-len(argspec.defaults):], argspec.defaults)))
if 'axis' in kwargs:
axes = (kwargs['axis'],)
elif 'axes' in kwargs:
axes = kwargs['axes']
if axes is None:
axes = default_args['axes']
else:
if 'axis' in default_args:
# default 1D
axes = (default_args['axis'],)
else:
# default nD
axes = default_args['axes']
if axes is None:
axes = (-1,)
return axes
def s_from_kwargs(self, test_shape, kwargs):
''' Return either a scalar s or a tuple depending on
whether axis or axes is specified
'''
argspec = inspect.getargspec(getattr(self.test_interface, self.func))
default_args = dict(list(zip(
argspec.args[-len(argspec.defaults):], argspec.defaults)))
if 'axis' in kwargs:
s = test_shape[kwargs['axis']]
elif 'axes' in kwargs:
axes = kwargs['axes']
if axes is not None:
s = []
for each_axis in axes:
s.append(test_shape[each_axis])
else:
# default nD
s = []
try:
for each_axis in default_args['axes']:
s.append(test_shape[each_axis])
except TypeError:
try:
s = list(test_shape[
self.default_s_from_shape_slicer])
except TypeError:
# We had an integer as the default, so force
# it to be a list
s = [test_shape[self.default_s_from_shape_slicer]]
else:
if 'axis' in default_args:
# default 1D
s = test_shape[default_args['axis']]
else:
# default nD
s = []
try:
for each_axis in default_args['axes']:
s.append(test_shape[each_axis])
except TypeError:
s = None
return s
def test_valid(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
s = None
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_on_non_numpy_array(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
array_type = (lambda test_shape, dtype:
dtype_tuple[1](test_shape, dtype).tolist())
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
s = None
self.validate(array_type,
test_shape, dtype, s, kwargs)
def test_fail_on_invalid_s_or_axes(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, args, exception, e_str in self.invalid_args:
input_array = dtype_tuple[1](test_shape, dtype)
self.assertRaisesRegex(exception, e_str,
getattr(self.test_interface, self.func),
*((input_array,) + args))
def test_same_sized_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_bigger_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
try:
for each_axis, length in enumerate(s):
s[each_axis] += 2
except TypeError:
s += 2
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_smaller_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
try:
for each_axis, length in enumerate(s):
s[each_axis] -= 2
except TypeError:
s -= 2
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def check_arg(self, arg, arg_test_values, array_type, test_shape,
dtype, s, kwargs):
'''Check that the correct arg is passed to the builder'''
# We trust the builders to work as expected when passed
# the correct arg (the builders have their own unittests).
return_values = []
input_array = array_type(test_shape, dtype)
def fake_fft(*args, **kwargs):
return_values.append((args, kwargs))
return (args, kwargs)
try:
# Replace the function that is to be used
real_fft = getattr(self.test_interface, self.func)
setattr(self.test_interface, self.func, fake_fft)
_kwargs = kwargs.copy()
for each_value in arg_test_values:
_kwargs[arg] = each_value
builder_args = getattr(self.test_interface, self.func)(
input_array.copy(), s, **_kwargs)
self.assertTrue(builder_args[1][arg] == each_value)
# make sure it was called
self.assertTrue(len(return_values) > 0)
except:
raise
finally:
# Make sure we set it back
setattr(self.test_interface, self.func, real_fft)
# Validate it aswell
for each_value in arg_test_values:
_kwargs[arg] = each_value
builder_args = getattr(self.test_interface, self.func)(
input_array.copy(), s, **_kwargs)
self.validate(array_type, test_shape, dtype, s, _kwargs)
def test_auto_align_input(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
self.check_arg('auto_align_input', (True, False),
dtype_tuple[1], test_shape, dtype, s, kwargs)
def test_auto_contiguous_input(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
self.check_arg('auto_contiguous', (True, False),
dtype_tuple[1], test_shape, dtype, s, kwargs)
def test_bigger_and_smaller_s(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
i = -1
for test_shape, s, kwargs in self.test_data:
try:
for each_axis, length in enumerate(s):
s[each_axis] += i * 2
i *= i
except TypeError:
s += i * 2
i *= i
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_dtype_coercian(self):
# Make sure we input a dtype that needs to be coerced
if functions[self.func] == 'r2c':
dtype_tuple = self.io_dtypes['complex']
else:
dtype_tuple = self.io_dtypes['r2c']
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
s = None
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
def test_planner_effort(self):
'''Test the planner effort arg
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
test_shape = (16,)
for dtype in dtype_tuple[0]:
s = None
if self.axes_kw == 'axis':
kwargs = {'axis': -1}
else:
kwargs = {'axes': (-1,)}
for each_effort in ('FFTW_ESTIMATE', 'FFTW_MEASURE',
'FFTW_PATIENT', 'FFTW_EXHAUSTIVE'):
kwargs['planner_effort'] = each_effort
self.validate(
dtype_tuple[1], test_shape, dtype, s, kwargs)
kwargs['planner_effort'] = 'garbage'
self.assertRaisesRegex(ValueError, 'Invalid planner effort',
self.validate,
*(dtype_tuple[1], test_shape, dtype, s, kwargs))
def test_threads_arg(self):
'''Test the threads argument
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
test_shape = (16,)
for dtype in dtype_tuple[0]:
s = None
if self.axes_kw == 'axis':
kwargs = {'axis': -1}
else:
kwargs = {'axes': (-1,)}
self.check_arg('threads', (1, 2, 5, 10),
dtype_tuple[1], test_shape, dtype, s, kwargs)
kwargs['threads'] = 'bleh'
# Should not work
self.assertRaises(TypeError,
self.validate,
*(dtype_tuple[1], test_shape, dtype, s, kwargs))
def test_overwrite_input(self):
'''Test the overwrite_input flag
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, _kwargs in self.test_data:
s = None
kwargs = _kwargs.copy()
self.validate(dtype_tuple[1], test_shape, dtype, s, kwargs)
self.check_arg(self.overwrite_input_flag, (True, False),
dtype_tuple[1], test_shape, dtype, s, kwargs)
def test_input_maintained(self):
'''Test to make sure the input is maintained by default.
'''
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, kwargs in self.test_data:
input_array = dtype_tuple[1](test_shape, dtype)
orig_input_array = input_array.copy()
getattr(self.test_interface, self.func)(
input_array, s, **kwargs)
self.assertTrue(
numpy.alltrue(input_array == orig_input_array))
class InterfacesNumpyFFTTestIFFT(InterfacesNumpyFFTTestFFT):
func = 'ifft'
class InterfacesNumpyFFTTestRFFT(InterfacesNumpyFFTTestFFT):
func = 'rfft'
class InterfacesNumpyFFTTestIRFFT(InterfacesNumpyFFTTestFFT):
func = 'irfft'
realinv = True
class InterfacesNumpyFFTTestHFFT(InterfacesNumpyFFTTestFFT):
func = 'hfft'
realinv = True
class InterfacesNumpyFFTTestIHFFT(InterfacesNumpyFFTTestFFT):
func = 'ihfft'
class InterfacesNumpyFFTTestFFT2(InterfacesNumpyFFTTestFFT):
axes_kw = 'axes'
func = 'ifft2'
test_shapes = (
((128, 64), {'axes': None}),
((128, 32), {'axes': None}),
((128, 32, 4), {'axes': (0, 2)}),
((59, 100), {'axes': (-2, -1)}),
((64, 128, 16), {'axes': (0, 2)}),
((4, 6, 8, 4), {'axes': (0, 3)}),
)
invalid_args = (
((100,), ((100, 200),), ValueError, 'Shape error'),
((100, 200), ((100, 200, 100),), ValueError, 'Shape error'),
((100,), ((100, 200), (-3, -2, -1)), ValueError, 'Shape error'),
((100, 200), (100, -1), TypeError, ''),
((100, 200), ((100, 200), (-3, -2)), IndexError, 'Invalid axes'),
((100, 200), ((100,), (-3,)), IndexError, 'Invalid axes'))
def test_shape_and_s_different_lengths(self):
dtype_tuple = self.io_dtypes[functions[self.func]]
for dtype in dtype_tuple[0]:
for test_shape, s, _kwargs in self.test_data:
kwargs = copy.copy(_kwargs)
try:
s = s[1:]
except TypeError:
self.skipTest('Not meaningful test on 1d arrays.')
del kwargs['axes']
self.validate(dtype_tuple[1],
test_shape, dtype, s, kwargs)
class InterfacesNumpyFFTTestIFFT2(InterfacesNumpyFFTTestFFT2):
func = 'ifft2'
class InterfacesNumpyFFTTestRFFT2(InterfacesNumpyFFTTestFFT2):
func = 'rfft2'
class InterfacesNumpyFFTTestIRFFT2(InterfacesNumpyFFTTestFFT2):
func = 'irfft2'
realinv = True
class InterfacesNumpyFFTTestFFTN(InterfacesNumpyFFTTestFFT2):
func = 'ifftn'
test_shapes = (
((128, 32, 4), {'axes': None}),
((64, 128, 16), {'axes': (0, 1, 2)}),
((4, 6, 8, 4), {'axes': (0, 3, 1)}),
((4, 6, 8, 4), {'axes': (0, 3, 1, 2)}),
)
class InterfacesNumpyFFTTestIFFTN(InterfacesNumpyFFTTestFFTN):
func = 'ifftn'
class InterfacesNumpyFFTTestRFFTN(InterfacesNumpyFFTTestFFTN):
func = 'rfftn'
class InterfacesNumpyFFTTestIRFFTN(InterfacesNumpyFFTTestFFTN):
func = 'irfftn'
realinv = True
test_cases = (
InterfacesNumpyFFTTestModule,
InterfacesNumpyFFTTestFFT,
InterfacesNumpyFFTTestIFFT,
InterfacesNumpyFFTTestRFFT,
InterfacesNumpyFFTTestIRFFT,
InterfacesNumpyFFTTestHFFT,
InterfacesNumpyFFTTestIHFFT,
InterfacesNumpyFFTTestFFT2,
InterfacesNumpyFFTTestIFFT2,
InterfacesNumpyFFTTestRFFT2,
InterfacesNumpyFFTTestIRFFT2,
InterfacesNumpyFFTTestFFTN,
InterfacesNumpyFFTTestIFFTN,
InterfacesNumpyFFTTestRFFTN,
InterfacesNumpyFFTTestIRFFTN,)
#test_set = {'InterfacesNumpyFFTTestHFFT': ('test_valid',)}
test_set = None
if __name__ == '__main__':
run_test_suites(test_cases, test_set)
| |
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""CA plugin."""
import os
import re
import random
import gettext
_ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup')
from M2Crypto import X509
from otopi import util
from otopi import plugin
from otopi import transaction
from otopi import filetransaction
from otopi import constants as otopicons
from ovirt_engine import util as outil
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.engine import vdcoption
from ovirt_engine_setup import util as osetuputil
@util.export
class Plugin(plugin.PluginBase):
"""CA plugin."""
class CATransaction(transaction.TransactionElement):
"""yum transaction element."""
def __init__(self, parent, uninstall_files):
self._parent = parent
self._uninstall_files = uninstall_files
def __str__(self):
return _("CA Transaction")
def prepare(self):
pass
def abort(self):
for f in self._uninstall_files:
if os.path.exists(f):
os.unlink(f)
def commit(self):
pass
def _subjectComponentEscape(self, s):
return outil.escape(s, '/\\')
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._enabled = False
@plugin.event(
stage=plugin.Stages.STAGE_BOOT,
)
def _boot(self):
self.environment[
otopicons.CoreEnv.LOG_FILTER_KEYS
].append(
oenginecons.PKIEnv.STORE_PASS
)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
oenginecons.PKIEnv.STORE_PASS,
oengcommcons.Defaults.DEFAULT_PKI_STORE_PASS
)
self.environment.setdefault(
oenginecons.PKIEnv.COUNTRY,
oengcommcons.Defaults.DEFAULT_PKI_COUNTRY
)
self.environment.setdefault(
oenginecons.PKIEnv.ORG,
None
)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
condition=lambda self: not os.path.exists(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT
)
)
def _setup(self):
self._enabled = True
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
oengcommcons.Stages.DIALOG_TITLES_E_PKI,
),
after=(
osetupcons.Stages.CONFIG_PROTOCOLS_CUSTOMIZATION,
oengcommcons.Stages.DIALOG_TITLES_S_PKI,
),
name=oenginecons.Stages.CA_ALLOWED,
)
def _customization_enable(self):
if not self.environment[oenginecons.CoreEnv.ENABLE]:
self._enabled = False
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
oengcommcons.Stages.DIALOG_TITLES_E_PKI,
),
after=(
osetupcons.Stages.CONFIG_PROTOCOLS_CUSTOMIZATION,
oengcommcons.Stages.DIALOG_TITLES_S_PKI,
oenginecons.Stages.CA_ALLOWED
),
condition=lambda self: self._enabled,
)
def _customization(self):
if self._enabled:
if self.environment[oenginecons.PKIEnv.ORG] is None:
org = 'Test'
if '.' in self.environment[osetupcons.ConfigEnv.FQDN]:
org = self.environment[
osetupcons.ConfigEnv.FQDN
].split('.', 1)[1]
self.environment[
oenginecons.PKIEnv.ORG
] = self.dialog.queryString(
name='OVESETUP_PKI_ORG',
note=_(
'Organization name for certificate [@DEFAULT@]: '
),
prompt=True,
default=org,
)
else:
self.dialog.note(
text=_('PKI is already configured'),
)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
name=oenginecons.Stages.CA_AVAILABLE,
condition=lambda self: self._enabled,
)
def _misc(self):
# TODO
# this implementaiton is not transactional
# too many issues with legacy ca implementation
# need to work this out to allow transactional
# for now just delete files if we fail
uninstall_files = []
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
self.CATransaction(
parent=self,
uninstall_files=uninstall_files,
)
)
# LEGACY NOTE
# This is needed for avoiding error in create_ca when supporting
# max cn length of 64.
# please DON'T increase this size, any value over 55 will fail the
# setup. the truncated host-fqdn is concatenated with a random string
# to create a unique CN value.
self.environment[
osetupcons.CoreEnv.REGISTER_UNINSTALL_GROUPS
].createGroup(
group='ca_pki',
description='PKI keys',
optional=True,
).addFiles(
group='ca_pki',
fileList=uninstall_files,
)
MAX_HOST_FQDN_LEN = 55
self.logger.info(_('Creating CA'))
localtransaction = transaction.Transaction()
with localtransaction:
for name in (
oenginecons.FileLocations.OVIRT_ENGINE_PKI_CA_TEMPLATE,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_CERT_TEMPLATE,
):
localtransaction.append(
filetransaction.FileTransaction(
name=name[:-len('.in')],
content=outil.processTemplate(
name,
{
'@AIA@': 'http://%s:%s%s' % (
self.environment[
osetupcons.ConfigEnv.FQDN
],
self.environment[
oengcommcons.ConfigEnv.PUBLIC_HTTP_PORT
],
oenginecons.Const.ENGINE_PKI_CA_URI,
)
}
),
modifiedList=uninstall_files,
),
)
self.execute(
args=(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_CA_CREATE,
'--subject=/C=%s/O=%s/CN=%s.%s' % (
self._subjectComponentEscape(
self.environment[oenginecons.PKIEnv.COUNTRY],
),
self._subjectComponentEscape(
self.environment[oenginecons.PKIEnv.ORG],
),
self._subjectComponentEscape(
self.environment[
osetupcons.ConfigEnv.FQDN
][:MAX_HOST_FQDN_LEN],
),
random.randint(10000, 99999),
),
'--keystore-password=%s' % (
self.environment[oenginecons.PKIEnv.STORE_PASS],
),
),
envAppend={
'JAVA_HOME': self.environment[
oengcommcons.ConfigEnv.JAVA_HOME
],
},
)
for name in (
'engine',
'apache',
'jboss',
'websocket-proxy',
'reports'
):
self.execute(
(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_CA_ENROLL,
'--name=%s' % name,
'--password=%s' % (
self.environment[oenginecons.PKIEnv.STORE_PASS],
),
'--subject=/C=%s/O=%s/CN=%s' % (
self._subjectComponentEscape(
self.environment[oenginecons.PKIEnv.COUNTRY],
),
self._subjectComponentEscape(
self.environment[oenginecons.PKIEnv.ORG],
),
self._subjectComponentEscape(
self.environment[osetupcons.ConfigEnv.FQDN],
),
),
),
)
uninstall_files.extend(
(
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_CERT,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_APACHE_STORE,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_KEY,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CERT,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_STORE,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_TRUST_STORE,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_JBOSS_STORE,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_JBOSS_CERT,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_CA_CERT_CONF,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_CERT_CONF,
(
oenginecons.FileLocations.
OVIRT_ENGINE_PKI_LOCAL_WEBSOCKET_PROXY_CERT
),
(
oenginecons.FileLocations.
OVIRT_ENGINE_PKI_LOCAL_WEBSOCKET_PROXY_STORE
),
)
)
self.execute(
args=(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_PKCS12_EXTRACT,
'--name=websocket-proxy',
'--passin=%s' % (
self.environment[oenginecons.PKIEnv.STORE_PASS],
),
'--key=%s' % (
oenginecons.FileLocations.
OVIRT_ENGINE_PKI_LOCAL_WEBSOCKET_PROXY_KEY,
),
),
logStreams=False,
)
uninstall_files.append(
oenginecons.FileLocations.
OVIRT_ENGINE_PKI_LOCAL_WEBSOCKET_PROXY_KEY
)
self.execute(
args=(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_PKCS12_EXTRACT,
'--name=reports',
'--passin=%s' % (
self.environment[oenginecons.PKIEnv.STORE_PASS],
),
'--key=%s' % (
oenginecons.FileLocations.
OVIRT_ENGINE_PKI_REPORTS_KEY,
),
),
logStreams=False,
)
uninstall_files.append(
oenginecons.FileLocations.
OVIRT_ENGINE_PKI_REPORTS_KEY
)
self.execute(
args=(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_PKCS12_EXTRACT,
'--name=apache',
'--passin=%s' % (
self.environment[oenginecons.PKIEnv.STORE_PASS],
),
'--key=%s' % (
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_KEY,
),
),
logStreams=False,
)
uninstall_files.append(
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_KEY
)
if not os.path.exists(
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_CA_CERT
):
os.symlink(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT,
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_CA_CERT
)
uninstall_files.append(
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_CA_CERT
)
for f in (
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_STORE,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_JBOSS_STORE,
):
os.chown(
f,
osetuputil.getUid(
self.environment[osetupcons.SystemEnv.USER_ENGINE]
),
-1,
)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
after=(
oengcommcons.Stages.DB_CONNECTION_AVAILABLE,
),
condition=lambda self: self._enabled,
)
def miscOptions(self):
vdcoption.VdcOption(
statement=self.environment[oenginecons.EngineDBEnv.STATEMENT]
).updateVdcOptions(
options=(
{
'name': 'OrganizationName',
'value': self.environment[
oenginecons.PKIEnv.ORG
],
},
),
)
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
before=(
osetupcons.Stages.DIALOG_TITLES_E_SUMMARY,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_SUMMARY,
),
condition=lambda self: self.environment[oenginecons.CoreEnv.ENABLE],
)
def _closeup(self):
x509 = X509.load_cert(
file=oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT,
format=X509.FORMAT_PEM,
)
self.dialog.note(
text=_('Internal CA {fingerprint}').format(
fingerprint=re.sub(
r'(..)',
r':\1',
x509.get_fingerprint(md='sha1'),
)[1:],
)
)
# vim: expandtab tabstop=4 shiftwidth=4
| |
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ buro@petr.com, www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# constants.py
#
# Formatting conform https://google-styleguide.googlecode.com/svn/trunk/htmlcssguide.xml
#
try:
from config.config import Config
except ImportError:
from config.COPYTO_config import Config
class Constants(Config):
u"""Inherited by main Xierpa3 classes, to share default constants,
will allowing to be redefined by inheriting classes."""
# Indenting output
TAB_INDENT = ' '*2
UNTITLED = 'Untitled'
SINGLE_ATTRIBUTES = [] # Attributes that need no value in the output.
# Types of sites
SITETYPE_BLOG = 'blog'
# Webtype @fontface fonts, to be used for localhost demo purposes.
# Note that this package contains the a set of latest featured font, and may be changed in the future.
# If using the font in this package, safest is to refer to the functional constant names below,
# instead of making a direct reference to the family name.
# Of course, taking your own account at //www.webtype.com is even better :)
XIERPA3_DEMOFONTS = '//cloud.webtype.com/css/7aa22aa1-1709-4b55-b95c-3413d3e5280a.css'
# Redefine this list for other font packages in inheriting theme classes.
URL_FONTS = [] #XIERPA3_DEMOFONTS,)
BODYFAMILY = 'Georgia'
HEADFAMILY = 'Verdana'
LOGOFAMILY = HEADFAMILY
# Sponsored fonts in the example sites
URL_WEBTYPELOGO = '//data.xierpa.com.s3.amazonaws.com/xierpa3/_images/documentation/webtypelogo.png'
# Xierpa ico, answered by the default adapter.getIco()
URL_FAVICON = '//data.xierpadoc.com.s3.amazonaws.com/_images/xierpa_x.ico'
URL_LOGO = '//data.xierpa.com.s3.amazonaws.com/_images/xierpa_x_green.png'
# Placeholder for all images if not online.
URL_XIERPA3RESOURCES = '/xierpa3/resources/'
URL_IMAGEPLACEHOLDER = URL_XIERPA3RESOURCES + 'images/placeholder.png'
# CSS
URL_CSS = ['css/style.css']
# Know builder id's, used to check on a specific builder of really necessary.
# Note that these value should match with the id's of the builder classes.
TYPE_HTML = 'html'
TYPE_SASS = 'sass'
TYPE_CSS = 'css'
TYPE_PHP = 'php'
# Known component names.
C_TITLE = 'title'
USE_SCHEDULERVERBOSE = False
SCHEDULER_SLEEPTIME = 5.0
SCHEDULER_STATUS_IDLE = 'idle'
SCHEDULER_STATUS_RUN = 'run'
# Link window targets
TARGET_EXTERN = 'extern'
# Menu options
MENU_VERTICAL = 'vertical'
MENU_HORIZONTAL = 'horizontal' # Default
# Media ranges and widths
M_MOBILE_MAX = 755
M_TABLET_MIN = M_MOBILE_MAX+1
M_DESKTOP_MIN = 1024
M_TABLET_MAX = M_DESKTOP_MIN-1
MAXWIDTH = 1140
# ID's
ID_HEADER = 'header'
ID_LOGO = 'logo'
ID_NAV = 'nav'
ID_NAVIGATIONWRAP = 'navigation-wrap'
ID_MOBILENAVWRAP = 'nav-wrap'
ID_MENUICON = 'menu-icon'
ID_HOME = 'index' # Default article id, typically connected to textile file index.txt
DEFAULT_ARTICLEID = 'home' # If nothing is seleced in URL.
# Classes
CLASS_ANKEILER = 'ankeiler'
CLASS_ANSWER = 'answer'
CLASS_ARTICLE = 'article'
CLASS_ARTICLETOP = 'articleTop'
CLASS_AUTHOR = 'author'
CLASS_AUTOWIDTH = 'autoWidth' # Make img tags behave right for width='auto' in all browser.
CLASS_MAXWIDTH = 'maxWidth' # Make img tags behave right for width='auto' in all browser.
CLASS_CAPTION = 'caption'
CLASS_CATEGORY = 'category'
CLASS_CATEGORYTHUMB = 'categoryThumb'
CLASS_CHAPTER = 'chapter'
CLASS_CHAPTERNAVIGATION = 'chapterNavigation'
CLASS_COLUMN = 'column'
CLASS_CONTAINER = 'container'
CLASS_DOCUMENTATION = 'documentation'
CLASS_ERROR = 'error'
CLASS_EDITABLE = 'editable'
CLASS_FEATUREDITEM = 'featuredItem'
CLASS_FIRST = 'first'
CLASS_FOOTNOTE = 'footnote'
CLASS_FOOTNOTES = 'footnotes' # List of footnotes
CLASS_IMAGEBLOCK = 'imageBlock'
CLASS_ITEM = 'item'
CLASS_LAST = 'last'
CLASS_LEVEL = 'level'
CLASS_LEAD = 'lead'
CLASS_LEVEL = 'level'
CLASS_MENU = 'menu'
CLASS_MAILTO = 'mailto'
CLASS_MENULINK = 'menuLink'
CLASS_MENULINKS = 'menuLinks'
CLASS_MISSING = 'missing'
CLASS_MOBILECHAPTERNAVIGATION = 'mobileChapterNavigation'
CLASS_NAME = 'name'
CLASS_PAGE = 'page'
CLASS_PULLQUOTE = 'pullquote'
CLASS_QUESTION = 'question'
CLASS_ROW = 'row'
CLASS_RULER1 = 'ruler1'
CLASS_RULER2 = 'ruler2'
CLASS_SOCIALMEDIA = 'socialMedia'
CLASS_STRIKETHROUGH = 'strikeThrough'
CLASS_SUMMARY = 'summary'
CLASS_SUMMARYBOX = 'summaryBox'
CLASS_TITLE = 'title'
CLASS_HEADLINE = 'headline'
CLASS_TOPIC = 'topic'
CLASS_UNDERLINE = 'underline'
CLASS_BLOGRESPONSE = 'blogResponse'
CLASS_1COL = 'oneCol'
CLASS_2COL = 'twoCol'
CLASS_3COL = 'threeCol'
CLASS_4COL = 'fourCol'
CLASS_5COL = 'fiveCol'
CLASS_6COL = 'sixCol'
CLASS_7COL = 'sevenCol'
CLASS_8COL = 'eightCol'
CLASS_9COL = 'nineCol'
CLASS_10COL = 'tenCol'
CLASS_11COL = 'elevenCol'
CLASS_12COL = 'twelveCol'
MAXCOL = 12
COL2CLASS = {
1: CLASS_1COL, 2: CLASS_2COL, 3: CLASS_3COL, 4: CLASS_4COL, 5: CLASS_5COL, 6: CLASS_6COL,
7: CLASS_7COL, 8: CLASS_8COL, 9: CLASS_9COL, 10: CLASS_10COL, 11: CLASS_11COL, 12: CLASS_12COL,
}
# Params
# Don't use "css" as parameter, as it is part of the default CSS path.
PARAM_CSSTYPE = 'csstype'
PARAM_EDIT = 'edit'
PARAM_ARTICLE = 'article'
PARAM_CHAPTER = 'chapter' # Chapter index in the current article, starting with 0
PARAM_AUTHOR = 'author'
PARAM_ARTICLE = 'article'
PARAM_CATEGORY = 'category'
PARAM_SID = 'sid' # Session id
PARAM_DOCUMENTATION = 'documentation'
PARAM_FORCE = 'force' # Force the recalculation of the SASS/CSS
PARAM_AJAX = 'ajax'
PARAM_DEBUG = 'debug'
# Tag
TAG_BLOCK = 'div' # Default tag for blocks
# Image
IMG_DEFAULTBORDER = 0
# CSS Constants
ITALIC = 'italic'
VERTICAL = 'vertical' # Menu
BLOCK = 'block'
INLINEBLOCK = 'inline-block'
INLINE = 'inline'
TRUE = 'true' # CSS True
FALSE = 'false', # CSS False
NONE = 'none'
BOTH = 'both'
HIDDEN = 'hidden'
AUTO = 'auto'
BOTTOM = 'bottom'
UNDERLINE = 'underline'
ABSOLUTE = 'absolute'
RELATIVE = 'relative'
UPPERCASE = 'uppercase'
POINTER = 'pointer'
BOLD = 'bold'
LEFT = 'left'
RIGHT = 'right'
CENTER = 'center'
MIDDLE = 'middle'
NORMAL = 'normal'
REPEAT = 'repeat'
BASELINE = 'baseline'
DECIMAL = 'decimal'
LIST = 'list'
INSIDE = 'inside'
OUTSIDE = 'outside'
# CSS Writing modes
WM_HORIZONTALTB = 'horizontal-tb' # Default value. Content flows horizontally from left to right,
# vertically from top to bottom. The next horizontal line is positioned below the previous line.
# horizontal-tb is the default value, as this layout is used by most writing systems. For SVG1
# documents only, use the deprecated value lr, rl, or lr-tb.
WM_RLTB = 'rl-tb' # Content flows horizontally from right to left, vertically from top to bottom.
# The next horizontal line is positioned below the previous line.
WM_VERTICALLR = 'vertical-lr' # Content flows vertically from top to bottom, horizontally from left
# to right. The next vertical line is positioned to the right of the previous line.
# For SVG1 documents only, use the deprecated value tb-lr.
WM_VERTICALRL = 'vertical-rl' # Content flows vertically from top to bottom, horizontally from
# right to left. The next vertical line is positioned to the left of the previous line.
# For SVG1 documents only, use the deprecated value tb or tb-rl.
WM_BTRL = 'bt-rl' # Content flows vertically from bottom to top, horizontally right to left.
# The next vertical line is positioned to the left of the previous line.
WM_BTLR = 'br-lr' # Content flows vertically from bottom to top, horizontally left to right.
# The next vertical line is positioned to the right of the previous line.
WM_LRBT = 'lr-bt' # Content flows horizontally from left to right, vertically from bottom to top.
# The next horizontal line is positioned above the previous line.
WM_RLBT = 'rl-bt' # Content flows horizontally from right to left, vertically from bottom to top.
# The next horizontal line is positioned above the previous line.
WM_LR = 'lr' # Deprecated except for SVG1 documents. For CSS, use horizontal-tb (default value).
WM_LRTB = 'lr-tb' # Deprecated except for SVG1 documents. For CSS, use horizontal-tb (default value).
WM_RL = 'rl' # Deprecated except for SVG1 documents. For CSS, use horizontal-tb (default value).
WM_TB = 'tb' # Deprecated except for SVG1 documents. For CSS, use vertical-rl.
WM_TBLR = 'tb-lr' # Deprecated except for SVG1 documents. For CSS, use vertical-lr.
WM_TBRL = 'tb-rl' # Deprecated except for SVG1 documents. For CSS, use vertical-rl.
# Pseudo CSS selectors
FIRSTCHILD = 'first-child'
# Colors
BLACK = 'black'
WHITE = 'white'
# Builder postfixes
# These are tested on dispatcher method and attribute name postfix against the generic names.
# Must containt the ATTR_POSTFIX of all available builders.
ATTR_POSTFIXES = set(('html', 'css'))
ATTR_MEDIA = 'media' # Special attribute in components to define (a list of) Media instances.
# Template names (as parameter in url)
TEMPLATE_INDEX = 'index'
TEMPLATE_ARTICLE = 'article'
TEMPLATE_ARTICLES = 'articles'
TEMPLATE_DOCUMENTATION = 'documentation'
TEMPLATE_DEFAULT = TEMPLATE_INDEX
# Adapter
# Adapter data fields that can be return optionally by any adapter.
ADAPTER_ITEMS = 'items' # Chapters of an article.
ADAPTER_MENU = 'menu' # Comma separated menu list
ADAPTER_URL = 'url' # Comma separated url list
ADAPTER_ERROR = 'error'
ADAPTER_CATEGORY = 'category'
ADAPTER_FEATURED = 'featured' # Comma separated list of featured articles ids
ADAPTER_SOURCE = 'source' # Source of an article as stored, before any transformation.
#ADAPTER_PAGETITLE = 'pageTitle'
#ADAPTER_LOGO = 'logo'
#ADAPTER_MESSAGE = 'message'
#ADAPTER_CHAPTERS = 'chapters'
#ADAPTER_FEATUREDARTICLES = 'featuredArticles'
#ADAPTER_FOOTER = 'footer'
#ADAPTER_SOCIALMEDIA = 'socialMedia'
#ADAPTER_TAGCLOUD = 'tagCloud'
#ADAPTER_ARTICLE = 'article'
#ADAPTER_PAGES = 'pages'
#ADAPTER_MOBILEPAGES = 'mobilePages'
# Comma separated list fields
ADAPTER_COMMAFIELDS = set((ADAPTER_URL, ADAPTER_MENU, ADAPTER_CATEGORY,
ADAPTER_FEATURED))
# Types of article selector
SELECTOR_FEATURED = 'featured'
# SASS
SASS_NESTED = 'nested'
SASS_EXPANDED = 'expanded'
SASS_COMPACT = 'compact'
SASS_COMPRESSED = 'compressed'
SASS_STYLES = (SASS_NESTED, SASS_EXPANDED, SASS_COMPACT, SASS_COMPRESSED)
SASS_DEFAULTSTYLE = SASS_COMPRESSED
META_DESCRIPTION = 'description'
META_KEYWORDS = 'keywords'
# ---------------------------------------------------------------------------------------------------------
# S E S S I O N K E Y S
SESSION_ID = PARAM_SID
SESSION_SESSIONEXPIRATIONTIME = 600 # 10 minutes for normal usage of the site
SESSION_EDITEXPIRATIONTIME = 3600 # 1 hour for editing mode.
SESSION_SIDDIGITS = 64 # Number of digits chunks for the session id (64 digits default)
# Don't make too high or else it will not fit in the cookie
SESSION_LANGUAGE = 'language'
SESSION_TYPESTAMPRANDOMRANGE = 10000000 # Random range added to a session timestamp
# ---------------------------------------------------------------------------------------------------------
# D A T A B A S E
#
# Standard table and field names
#
TABLE_XREF = 'xref'
TABLE_ADDRESS = 'address'
FIELD_XSRCTABLE = 'xsrc'
FIELD_XSRCFIELD = 'xsrc'
FIELD_XREFSRCID = 'srcId'
FIELD_XDSTTABLE = 'xdst'
FIELD_XDSTFIELD = 'dstId'
FIELD_XREFDSTID = 'dstId'
# ---------------------------------------------------------------------------------------------------------
# B R O W S E R S T U F F
BROWSER_UNKNOWN = 'browser_unknown'
BROWSER_SAFARI = 'browser_safari'
BROWSER_FIREFOX = 'browser_firefox'
BROWSER_CAMINO = 'browser_camino'
BROWSER_IE = 'browser_ie'
BROWSER_OPERA = 'browser_opera'
BROWSER_NETSCAPE = 'browser_netscape'
BROWSER_CHROME = 'browser_chrome'
BROWSER_IPHONE = 'browser_iphone'
BROWSER_OSMAC = 'browser_os_mac'
BROWSER_OSWINDOWS = 'browser_os_windows'
# Prefixes are automatically inserted, if it is used in the attribute dictionary.
PREFIXES = ('webkit', 'ms', 'moz', 'o')
PREFIXES_EXTENDES = PREFIXES + ('xv', 'epub', 'apple', 'khtml', 'safari', 'chrome', 'wap')
FALSEVALUES = ('', 0, '0', 'f', 'F', 'none', 'None', 'NONE', 'false', 'False', 'FALSE', 'n', 'N', 'no', 'No', 'NO',
None, False)
TRUEVALUES = (1, '1', 't', 'T', 'true', 'True', 'TRUE', 'y', 'Y', 'yes', 'Yes', 'YES', True)
BOOLVALUES = FALSEVALUES + TRUEVALUES
# ---------------------------------------------------------------------------------------------------------
# X S L
XSL_XMLCONVERSIONS = (int, float, long, tuple, list, dict, bool)
# ---------------------------------------------------------------------------------------------------------
# E X T E N S I O N S
# Text / code formats.
EXTENSION_XML = 'xml'
EXTENSION_XSL = 'xsl'
EXTENSION_XSD = 'xsd'
EXTENSION_TXT = 'txt'
EXTENSION_CSS = 'css'
EXTENSION_HTML = 'html'
EXTENSION_JSON = 'json'
EXTENSION_PY = 'py'
EXTENSION_JS = 'js'
EXTENSION_EPUB = 'epub'
# Lossy web image formats.
EXTENSION_JPG = 'jpg'
EXTENSION_JPEG = 'jpeg'
EXTENSION_PNG = 'png'
EXTENSION_GIF = 'gif'
# Common web document formats.
EXTENSION_PDF = 'pdf'
EXTENSION_EPS = 'eps'
EXTENSION_ZIP = 'zip'
EXTENSION_RAR = 'rar'
EXTENSION_TAR = 'tar'
EXTENSION_GZ = 'gz'
EXTENSION_UFO = 'ufo'
EXTENSION_TTF = 'ttf'
EXTENSION_OTF = 'otf'
# Lossless print formats.
EXTENSION_TIF = 'tif'
EXTENSION_TIFF = 'tiff'
# Filetype sets.
EXTENSIONS_XML = (EXTENSION_XML, EXTENSION_XSL, EXTENSION_XSD)
EXTENSIONS_TEXT = (EXTENSION_XML, EXTENSION_XSL, EXTENSION_XSD, EXTENSION_TXT, EXTENSION_PY, EXTENSION_JS, EXTENSION_JSON)
EXTENSIONS_WEBIMAGES = (EXTENSION_JPG, EXTENSION_JPEG, EXTENSION_PNG, EXTENSION_GIF)
EXTENSIONS_DOCUMENTS = (EXTENSION_PDF, EXTENSION_EPS, EXTENSION_ZIP, EXTENSION_RAR, EXTENSION_TAR, EXTENSION_GZ)
EXTENSIONS_TIFF = (EXTENSION_TIFF, EXTENSION_TIF)
# Valid files for uploading.
EXTENSIONS_VALIDFILES = EXTENSIONS_WEBIMAGES + EXTENSIONS_DOCUMENTS + EXTENSIONS_TIFF
DEFAULT_EXTENSION = '' # Must be empty, or else plain url's won't work.
IMAGE_FORMAT = EXTENSION_PNG # Default image format by ImageBuilder
# ---------------------------------------------------------------------------------------------------------
# M I M E T Y P E S
MIMETYPES = {
'otf': 'font/ttf', # Not standard
'ttf': 'font/ttf', # Not standard
'wff': 'font/woff', # Future?
'woff': 'application/x-font-woff', # IE9 requires this
'323': 'text/h323',
'acx': 'application/internet-property-stream',
'ai': 'application/postscript',
'aif': 'audio/x-aiff',
'aifc': 'audio/x-aiff',
'aiff': 'audio/x-aiff',
'asf': 'video/x-ms-asf',
'asr': 'video/x-ms-asf',
'asx': 'video/x-ms-asf',
'au': 'audio/basic',
'avi': 'video/x-msvideo',
'axs': 'application/olescript',
'bas': 'text/plain',
'bcpio': 'application/x-bcpio',
'bin': 'application/octet-stream',
'bmp': 'image/bmp',
'c': 'text/plain',
'cat': 'application/vnd.ms-pkiseccat',
'cdf': 'application/x-cdf',
'cer': 'application/x-x509-ca-cert',
'class': 'application/octet-stream',
'clp': 'application/x-msclip',
'cmx': 'image/x-cmx',
'cod': 'image/cis-cod',
'cpio': 'application/x-cpio',
'crd': 'application/x-mscardfile',
'crl': 'application/pkix-crl',
'crt': 'application/x-x509-ca-cert',
'csh': 'application/x-csh',
'css': 'text/css',
'dcr': 'application/x-director',
'der': 'application/x-x509-ca-cert',
'dir': 'application/x-director',
'dll': 'application/x-msdownload',
'dms': 'application/octet-stream',
'doc': 'application/msword',
'dot': 'application/msword',
'dvi': 'application/x-dvi',
'dxr': 'application/x-director',
'eot': 'application/vnd.ms-fontobject',
'eps': 'application/postscript',
'epub': 'application/epub+zip',
'etx': 'text/x-setext',
'evy': 'application/envoy',
'exe': 'application/octet-stream',
'fif': 'application/fractals',
'flr': 'x-world/x-vrml',
'gif': 'image/gif',
'gtar': 'application/x-gtar',
'gz': 'application/x-gzip',
'h': 'text/plain',
'hdf': 'application/x-hdf',
'hlp': 'application/winhlp',
'hqx': 'application/mac-binhex40',
'hta': 'application/hta',
'htc': 'text/x-component',
'htm': 'text/html',
'html': 'text/html',
'htt': 'text/webviewhtml',
'ico': 'image/x-icon',
'ief': 'image/ief',
'iii': 'application/x-iphone',
'ins': 'application/x-internet-signup',
'isp': 'application/x-internet-signup',
'jfif': 'image/pipeg',
'jpe': 'image/jpeg',
'jpeg': 'image/jpeg',
'json': 'application/json',
'jpg': 'image/jpeg',
'js': 'application/x-javascript',
'latex': 'application/x-latex',
'lha': 'application/octet-stream',
'lsf': 'video/x-la-asf',
'lsx': 'video/x-la-asf',
'lzh': 'application/octet-stream',
'm13': 'application/x-msmediaview',
'm14': 'application/x-msmediaview',
'm3u': 'audio/x-mpegurl',
'man': 'application/x-troff-man',
'mdb': 'application/x-msaccess',
'me': 'application/x-troff-me',
'mht': 'message/rfc822',
'mhtml': 'message/rfc822',
'mid': 'audio/mid',
'mny': 'application/x-msmoney',
'mov': 'video/quicktime',
'movie': 'video/x-sgi-movie',
'mp2': 'video/mpeg',
'mp3': 'audio/mpeg',
'mpa': 'video/mpeg',
'mpe': 'video/mpeg',
'mpeg': 'video/mpeg',
'mpg': 'video/mpeg',
'mpp': 'application/vnd.ms-project',
'mpv2': 'video/mpeg',
'ms': 'application/x-troff-ms',
'mvb': 'application/x-msmediaview',
'nws': 'message/rfc822',
'oda': 'application/oda',
'p10': 'application/pkcs10',
'p12': 'application/x-pkcs12',
'p7b': 'application/x-pkcs7-certificates',
'p7c': 'application/x-pkcs7-mime',
'p7m': 'application/x-pkcs7-mime',
'p7r': 'application/x-pkcs7-certreqresp',
'p7s': 'application/x-pkcs7-signature',
'pbm': 'image/x-portable-bitmap',
'pdf': 'application/pdf',
'pfx': 'application/x-pkcs12',
'pgm': 'image/x-portable-graymap',
'pko': 'application/ynd.ms-pkipko',
'pma': 'application/x-perfmon',
'pmc': 'application/x-perfmon',
'pml': 'application/x-perfmon',
'pmr': 'application/x-perfmon',
'pmw': 'application/x-perfmon',
'png': 'image/png',
'pnm': 'image/x-portable-anymap',
'pot,': 'application/vnd.ms-powerpoint',
'ppm': 'image/x-portable-pixmap',
'pps': 'application/vnd.ms-powerpoint',
'ppt': 'application/vnd.ms-powerpoint',
'prf': 'application/pics-rules',
'ps': 'application/postscript',
'py': 'text/plain',
'pyc': 'application/python',
'pub': 'application/x-mspublisher',
'qt': 'video/quicktime',
'ra': 'audio/x-pn-realaudio',
'ram': 'audio/x-pn-realaudio',
'ras': 'image/x-cmu-raster',
'rgb': 'image/x-rgb',
'rmi': 'audio/mid',
'roff': 'application/x-troff',
'rtf': 'application/rtf',
'rtx': 'text/richtext',
'scd': 'application/x-msschedule',
'sct': 'text/scriptlet',
'setpay': 'application/set-payment-initiation',
'setreg': 'application/set-registration-initiation',
'sh': 'application/x-sh',
'shar': 'application/x-shar',
'sit': 'application/x-stuffit',
'snd': 'audio/basic',
'spc': 'application/x-pkcs7-certificates',
'spl': 'application/futuresplash',
'src': 'application/x-wais-source',
'sql': 'text/plain',
'sst': 'application/vnd.ms-pkicertstore',
'stl': 'application/vnd.ms-pkistl',
'stm': 'text/html',
'svg': 'image/svg+xml',
'sv4cpio': 'application/x-sv4cpio',
'sv4crc': 'application/x-sv4crc',
'swf': 'application/x-shockwave-flash',
't': 'application/x-troff',
'tar': 'application/x-tar',
'tcl': 'application/x-tcl',
'tex': 'application/x-tex',
'texi': 'application/x-texinfo',
'texinfo': 'application/x-texinfo',
'tgz': 'application/x-compressed',
'tif': 'image/tiff',
'tiff': 'image/tiff',
'tr': 'application/x-troff',
'trm': 'application/x-msterminal',
'tsv': 'text/tab-separated-values',
'txt': 'text/plain',
'uls': 'text/iuls',
'ustar': 'application/x-ustar',
'vcf': 'text/x-vcard',
'vrml': 'x-world/x-vrml',
'wav': 'audio/x-wav',
'wcm': 'application/vnd.ms-works',
'wdb': 'application/vnd.ms-works',
'wks': 'application/vnd.ms-works',
'wmf': 'application/x-msmetafile',
'wps': 'application/vnd.ms-works',
'wri': 'application/x-mswrite',
'wrl': 'x-world/x-vrml',
'wrz': 'x-world/x-vrml',
'xaf': 'x-world/x-vrml',
'xbm': 'image/x-xbitmap',
'xla': 'application/vnd.ms-excel',
'xlc': 'application/vnd.ms-excel',
'xlm': 'application/vnd.ms-excel',
'xls': 'application/vnd.ms-excel',
'xlt': 'application/vnd.ms-excel',
'xlw': 'application/vnd.ms-excel',
'xml': 'text/plain', # XML
'xsl': 'text/plain', # XSL
'xsd': 'text/plain', # XML Schema
'xof': 'x-world/x-vrml',
'xpm': 'image/x-xpixmap',
'xwd': 'image/x-xwindowdump',
'z': 'application/x-compress',
'zip': 'application/zip',
}
MIMETYPE_FONTTTF = 'font/ttf'
MIMETYPE_CSS = MIMETYPES[EXTENSION_CSS]
MIMETYPE_PLAIN = MIMETYPES[EXTENSION_TXT]
MIMETYPE_HTML = MIMETYPES[EXTENSION_HTML]
MIMETYPE_JSON = MIMETYPES[EXTENSION_JSON]
MIMETYPE_JS = MIMETYPES[EXTENSION_JS]
MIMETYPE_PY = MIMETYPES[EXTENSION_PY]
MIMETYPE_XML = MIMETYPES[EXTENSION_XML]
MIMETYPE_XSL = MIMETYPES[EXTENSION_XSL]
MIMETYPE_XSD = MIMETYPES[EXTENSION_XSD]
MIMETYPE_EPUB = MIMETYPES[EXTENSION_EPUB]
MIMETYPE_PNG = MIMETYPES[EXTENSION_PNG]
MIMETYPE_JPG = MIMETYPES[EXTENSION_JPG]
MIMETYPE_GIF = MIMETYPES[EXTENSION_GIF]
DEFAULT_MIMETYPE = MIMETYPE_PLAIN
| |
# -*- coding: utf-8 -*-
"""
Module to handle masks: it defines:
- Mask: a mask object with command line methods: addPolygon, etc
- makeMaskGui: a GUI based way of creating masks
- maskBorder: to mask the borders of an array
- maskCenterLines: to mask the central lines (good for multi-panel det)
- interpretMask: interpret mask element (filename,y>500,array)
- interpretMasks: add up list of mask elements
"""
from __future__ import print_function,division,absolute_import
import sys
if sys.version_info.major == 2: input=raw_input
import logging
log = logging.getLogger(__name__)
import os
import re
import collections
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import fabio
def read(fname):
""" read data from file using fabio """
f = fabio.open(fname)
data = f.data
del f; # close file
return data
maskComponent = collections.namedtuple('maskComponent',['operation','geometry','vertices'])
def _rectangleToMask(X,Y,vertices):
( (x1,y1), (x2,y2) ) = vertices
if x1>x2: x1,x2=x2,x1
if y1>y2: y1,y2=y2,y1
return (X>x1) & (X<x2) & ( Y>y1) & (Y<y2)
def _circleToMask(X,Y,vertices):
c,p = vertices
r = np.sqrt( (p[0]-c[0])**2 + (p[1]-c[1])**2 )
d = np.sqrt((X-c[0])**2+(Y-c[1])**2)
return d<r
def _polygonToMask(X,Y,vertices):
points = np.vstack((X.flatten(),Y.flatten())).T
path = Path(vertices)
grid = path.contains_points(points)
return grid.reshape(X.shape)
class Mask(object):
""" class for making masks. True are pixels masked OUT.
This class provides methods for adding/subtracting components in the
for of rectangles, circles, polygons. Each addition/subtraction is
stored as 'operations' that are then applied on request.
Parameters
----------
img: {array,filename,shape of array}
img to use for calculation. Since only the shape is used a shape-like
tuple can be provided
"""
def __init__(self,img=None):
self.comp = []
if img is not None:
self.shape = img.shape
else:
self.shape = None
if isinstance(img,str): img = read(img)
# if img is not array at this point assume it is a shape-tuple
if not isinstance(img,np.ndarray): img = np.zeros( img, dtype=bool )
self.img = img
self.mask = None
self._cache = None
def _define_component(self,operation,geometry,*vertices):
#print("define_comp",type(vertices),vertices)
if geometry == 'circle' and len(vertices) == 3:
xcen,ycen,radius = vertices
vertices = ( (xcen,ycen), (xcen+radius,ycen) )
if geometry == 'rectangle' and len(vertices) == 4:
vertices = ( (vertices[0],vertices[1]),(vertices[2],vertices[3]) )
# make sure vertices tuples
if isinstance(vertices,list):
vertices = [ (v[0],v[1]) for v in vertices ]
vertices = tuple(vertices)
a = dict( vertices = None )
self.comp.append( maskComponent(operation=operation,vertices=vertices,geometry=geometry) )
def addCircle(self,*vertices): self._define_component( 'add', 'circle', *vertices )
def subtractCircle(self,*vertices): self._define_component( 'subtract', 'circle', *vertices )
def addRectangle(self,*vertices): self._define_component( 'add','rectangle', *vertices)
def subtractRectangle(self,*vertices): self._define_component( 'subtract','rectangle',*vertices)
def addPolygon(self,*vertices): self._define_component( 'add','polygon',*vertices)
def subtractPolygon(self,*vertices): self._define_component( 'subtract','polygon',*vertices)
def getMask(self,shape=None):
if shape is None and self.img is not None: shape = self.img.shape
if shape is None and self.img is None: shape = self._cache['shape']
if self._cache is None: self._cache = dict( shape = shape )
# reset cache if shape does not match
if shape != self._cache['shape']:
self._cache = dict( shape = shape )
X,Y = np.meshgrid ( range(shape[1]),range(shape[0]) )
for component in self.comp:
if component not in self._cache:
if component.geometry == 'circle':
mask = _circleToMask( X,Y,component.vertices )
elif component.geometry == 'rectangle':
mask = _rectangleToMask( X,Y,component.vertices )
elif component.geometry == 'polygon':
mask = _polygonToMask( X,Y,component.vertices )
else:
raise ValueError("Mask type %s not recongnized"%component.geometry)
self._cache[component] = mask
mask = np.zeros(shape,dtype=np.bool)
for comp in self.comp:
m = self._cache[ comp ]
if (comp.operation == "add"):
mask[m] = True
else:
mask[m] = False
self.mask = mask
return mask
def getMatplotlibMask(self,shape=None):
mask = self.getMask(shape=shape)
# convert
mpl_mask = np.zeros( (mask.shape[0],mask.shape[1],4) )
mpl_mask[:,:,:3] = 0.5; # gray color
mpl_mask[:,:,3] = mask/2; # give some transparency
return mpl_mask
def save(self,fname,inverted=False):
import fabio
if self.mask is None: self.getMask()
mask = self.mask
if (inverted): mask = ~mask
if os.path.splitext(fname)[1] == ".npy":
np.save(fname,mask)
else:
i=fabio.edfimage.edfimage(mask.astype(np.uint8)); # edf does not support bool
i.save(fname)
def snap(point,shape,snapRange=20):
""" snap 'point' if within 'snapRange' from the border defined by 'shape' """
snapped = list(point)
if snapped[0] < snapRange: snapped[0] = 0
if snapped[0] > shape[1]-snapRange: snapped[0] = shape[1]
if snapped[1] < snapRange: snapped[1] = 0
if snapped[1] > shape[0]-snapRange: snapped[1] = shape[0]
return tuple(snapped)
def getPoints(N=1,shape=(100,100),snapRange=0):
if N<1: print('Right click cancels last point, middle click ends the polygon')
c = plt.ginput(N)
c = [ snap(point,shape,snapRange=snapRange) for point in c ]
if len(c) == 1: c = c[0]
return c
def makeMaskGui(img,snapRange=60,clim='auto'):
""" interactive, click based approach do define a mask.
Parameters
----------
snapRange : int
controls border snapping (in pixels) use <= 0 to disable;
clim: 'auto' or list(min,max)
controls color scale os image, if 'auto' uses 2%-98% percentile
Returns
-------
Mask
instance of the Mask class that allows to modify or save the mask
"""
if isinstance(img,str): img = read(img)
mask = Mask(img)
if clim == "auto": clim = np.percentile(img,(2,98))
ans='ok'
while (ans != 'done'):
plt.imshow(img)
plt.clim(clim)
plt.imshow(mask.getMatplotlibMask())
plt.pause(0.01)
ans = input("What's next p/P/c/C/r/R/done? (capitals = subtract)")
if ans == "c":
print("Adding circle, click on center then another point to define radius")
vertices = getPoints(N=2,shape=img.shape,snapRange=snapRange)
mask.addCircle(*vertices)
if ans == "C":
print("Subtracting circle, click on center then another point to define radius")
vertices = getPoints(N=2,shape=img.shape,snapRange=snapRange)
mask.subtractCircle(*vertices)
if ans == "r":
print("Adding rectangle, click on one corner and then on the opposite one")
vertices = getPoints(N=2,shape=img.shape,snapRange=snapRange)
mask.addRectangle(*vertices)
if ans == "R":
print("Subtracting rectangle, click on one corner and then on the opposite one")
vertices = getPoints(N=2,shape=img.shape,snapRange=snapRange)
mask.subtractRectangle(*vertices)
if ans == 'p':
print("Adding polygon")
vertices = getPoints(N=-1,shape=img.shape,snapRange=snapRange)
mask.addPolygon(*vertices)
if ans == 'P':
print("Subtracting polygon")
vertices = getPoints(N=-1,shape=img.shape,snapRange=snapRange)
mask.subtractPolygon(*vertices)
plt.imshow(mask.getMatplotlibMask())
plt.pause(0.01)
fname = input("Enter a valid filename (ext .edf or .npy) if you want to save the mask (empty otherwise)")
try:
if fname != '':
ext = os.path.splitext(fname)[1]
if ext == '.edf':
mask.save(fname)
elif ext == '.npy':
np.save(fname,mask.getMask())
except Exception as e:
log.error("Error in saving mask")
log.error(e)
finally:
return mask
def maskBorder(width,shape):
""" mask the border of an array for a given width
Parameters
----------
width : int
the width of the region to mask (>0)
Returns
-------
boolean (False/True) array
True are the pixels masked out
"""
assert isinstance(width,int), "width has to be integer"
assert width>0, "width has to be positive"
mask = np.zeros(shape,dtype=bool)
mask[ :width , : ] = True
mask[ -width: , : ] = True
mask[ : , :width ] = True
mask[ : , -width: ] = True
return mask
def maskCenterLines(width,shape):
""" mask a cross going trough the center of the array for a given width
Parameters
----------
width : int
the width of the region to mask (>0)
Returns
-------
boolean (False/True) array
True are the pixels masked out
"""
assert isinstance(width,int), "width has to be integer"
assert width>0, "width has to be positive"
mask = np.zeros(shape,dtype=bool)
if isinstance(width,int): width = (width,width)
c0 = int(shape[0]/2)
c1 = int(shape[1]/2)
w0 = int(width[0]/2)
w1 = int(width[1]/2)
mask[ c0-w0:c0+w0 , : ] = True
mask[ : , c1-w1:c1+w1 ] = True
return mask
g_mask_str = re.compile("(\w)\s*(<|>)\s*(\d+)")
def interpretMask(mask,shape=None):
""" Interpret 'mask' as a mask
Parameters
----------
mask : filename or array or string like y>500
shape : array or tuple
needed to interpret y>500
Returns
-------
boolean (False/True) array
True are the pixels masked out
"""
maskout = None
## simplest case, an existing file
if isinstance(mask,str) and os.path.isfile(mask):
maskout = read(mask).astype(np.bool)
## mask string
elif isinstance(mask,str) and not os.path.isfile(mask):
if isinstance(shape,np.ndarray) : shape = shape.shape
err_msg = ValueError("The string '%s' could not be interpreted as simple\
mask; it should be something like x>10"%mask)
assert shape is not None, "_interpretMask needs a shape to interpret a string"
# interpret string
maskout = np.zeros(shape,dtype=bool)
match = g_mask_str.match(mask)
if match is None: raise err_msg
(axis,sign,lim) = match.groups()
if axis not in ("x","y"): raise err_msg
if sign not in (">","<"): raise err_msg
lim = int(lim)
idx = slice(lim,None) if sign == ">" else slice(None,lim)
if axis == 'y':
maskout[idx,:] = True
else:
maskout[:,idx] = True
elif isinstance(mask,np.ndarray):
maskout = mask.astype(np.bool)
elif mask is None:
assert shape is not None, "_interpretMask needs a shape to interpret a string"
maskout = np.zeros(shape,dtype=bool)
else:
maskout = None
raise ValueError("Could not interpret %s as mask input"%mask)
if shape is not None and maskout.shape != shape:
raise ValueError("The mask shape %s does not match the shape given as\
argument %s"%(maskout.shape,shape))
return maskout
def interpretMasks(masks,shape=None):
""" Interpret a single or a list of mask elements
Every element can be an array, a filename to read, a 'mask string'
(y>500).
Parameters
----------
masks : a 'mask element' or a list of mask elements
shape : array or tuple
needed to interpret y>500
Returns
-------
boolean (False/True) array
True are the pixels masked out
"""
if isinstance(masks,np.ndarray): return masks.astype(bool)
# make iterable
if not isinstance( masks, (list,tuple,np.ndarray) ): masks = (masks,)
masks = [interpretMask(mask,shape) for mask in masks]
# put them all together
mask = masks[0]
for m in masks[1:]:
mask = np.logical_or(mask,m)
return mask
def test(shape=(1000,2000)):
""" Make a simple mask programmatically """
mask = Mask()
mask.addCircle(400,300,250)
mask.subtractCircle(400,300,150)
mask.addRectangle(350,250,1500,700)
plt.imshow( mask.getMask(shape) )
return mask
if __name__ == "__main__":
test()
plt.show()
ans=input("Enter to finish")
| |
from .base import BaseLibLinear, BaseSVC, BaseLibSVM
from ..base import RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin
from ..feature_selection.from_model import _LearntSelectorMixin
class LinearSVC(BaseLibLinear, LinearClassifierMixin, _LearntSelectorMixin,
SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'l1' or 'l2' (default='l2')
Specifies the loss function. 'l1' is the hinge loss (standard SVM)
while 'l2' is the squared hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : int, default: 0
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
`coef_` : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. Furthermore
SGDClassifier is scalable to large number of samples as it uses
a Stochastic Gradient Descent optimizer.
Finally SGDClassifier can fit both dense and sparse data without
memory copy if the input is C-contiguous or CSR.
"""
def __init__(self, penalty='l2', loss='l2', dual=True, tol=1e-4, C=1.0,
multi_class='ovr', fit_intercept=True, intercept_scaling=1,
class_weight=None, verbose=0, random_state=None):
super(LinearSVC, self).__init__(
penalty=penalty, loss=loss, dual=dual, tol=tol, C=C,
multi_class=multi_class, fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
class_weight=class_weight, verbose=verbose,
random_state=random_state)
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementations is a based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each,
see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
.. The narrative documentation is available at http://scikit-learn.org/
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigm'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, random_state=None):
super(SVC, self).__init__(
'c_svc', kernel, degree, gamma, coef0, tol, C, 0., 0., shrinking,
probability, cache_size, class_weight, verbose, max_iter,
random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(NuSVC, self).__init__(
'nu_svc', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimaton.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma=0.0,
kernel='rbf', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
C=1.0, epsilon=0.1, shrinking=True, probability=False,
cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(SVR, self).__init__(
'epsilon_svr', kernel, degree, gamma, coef0, tol, C, 0., epsilon,
shrinking, probability, cache_size, None, verbose,
max_iter, random_state)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces with the parameter epsilon of SVR.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken. Only available if impl='nu_svc'.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True,
probability=False, tol=1e-3, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(NuSVR, self).__init__(
'nu_svr', kernel, degree, gamma, coef0, tol, C, nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outliers Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional
Degree of kernel function. Significant only in poly, rbf, sigmoid.
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional
Independent term in kernel function. It is only significant in
poly/sigmoid.
tol : float, optional
Tolerance for stopping criterion.
shrinking: boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficient of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
| |
###############################################################################
# general GAMMA utilities
# Copyright (c) 2014-2021, the pyroSAR Developers, Stefan Engelhardt.
# This file is part of the pyroSAR Project. It is subject to the
# license terms in the LICENSE.txt file found in the top-level
# directory of this distribution and at
# https://github.com/johntruckenbrodt/pyroSAR/blob/master/LICENSE.txt.
# No part of the pyroSAR project, including this file, may be
# copied, modified, propagated, or distributed except according
# to the terms contained in the LICENSE.txt file.
################################################################################
import math
import os
import re
import string
import codecs
import subprocess as sp
from datetime import datetime
from pyroSAR.examine import ExamineGamma
from spatialist.ancillary import parse_literal, run, union, dissolve
from spatialist.envi import hdr
from .error import gammaErrorHandler
class ISPPar(object):
"""
Reader for ISP parameter files of the GAMMA software package
This class allows to read all information from files in GAMMA's parameter file format.
Each key-value pair is parsed and added as attribute. For instance if the parameter file
contains the pair 'sensor: TSX-1' an attribute named 'sensor' with the value 'TSX-1' will be available.
The values are converted to native Python types, while unit identifiers like 'dB' or 'Hz' are removed.
Please see the GAMMA reference manual for further information on the actual file format.
Parameters
----------
filename: str
the GAMMA parameter file
Examples
--------
>>> from pyroSAR.gamma import ISPPar
>>> with ISPPar('S1A__IW___A_20141115T181801_VH_grd.par') as par:
... print(par) # print an overview of all available metadata
... print(par.keys) # print all parameter names
... for key, value in par.envidict().items():
... print('{0}: {1}'.format(key, value)) # print the ENVI HDR compliant metadata
Attributes
----------
keys: list
the names of all parameters
"""
_re_kv_pair = re.compile(r'^(\w+):\s*(.+)\s*')
_re_float_literal = re.compile(r'^[+-]?(?:(\d*\.\d+)|(\d+\.?))(?:[Ee][+-]?\d+)?')
def __init__(self, filename):
"""Parses an ISP parameter file from disk.
Args:
filename: The filename or file object representing the ISP parameter file.
"""
if isinstance(filename, str):
par_file = open(filename, 'r')
else:
par_file = filename
self.keys = ['filetype']
try:
content = par_file.read().split('\n')
except UnicodeDecodeError:
par_file = codecs.open(filename, 'r', encoding='utf-8', errors='ignore')
content = par_file.read()
printable = set(string.printable)
content = filter(lambda x: x in printable, content)
content = ''.join(list(content)).split('\n')
finally:
par_file.close()
if 'Image Parameter File' in content[0]:
setattr(self, 'filetype', 'isp')
elif 'DEM/MAP parameter file' in content[0]:
setattr(self, 'filetype', 'dem')
else:
setattr(self, 'filetype', 'unknown')
for line in content:
match = ISPPar._re_kv_pair.match(line)
if not match:
continue # Skip malformed lines with no key-value pairs
key = match.group(1)
items = match.group(2).split()
if len(items) == 0:
value = None
elif len(items) == 1:
value = parse_literal(items[0])
else:
if not ISPPar._re_float_literal.match(items[0]):
# Value is a string literal containing whitespace characters
value = match.group(2)
else:
# Evaluate each item and stop at the first non-float literal
value = []
for i in items:
match = ISPPar._re_float_literal.match(i)
if match:
value.append(parse_literal(match.group()))
else:
# If the first float literal is immediately followed by a non-float literal handle the
# first one as singular value, e.g. in '20.0970 dB'
if len(value) == 1:
value = value[0]
break
self.keys.append(key)
setattr(self, key, value)
if hasattr(self, 'date'):
try:
self.date = '{}-{:02d}-{:02d}T{:02d}:{:02d}:{:02f}'.format(*self.date)
except:
# if only date available
self.date = '{}-{:02d}-{:02d}'.format(*self.date)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return
def __getattr__(self, item):
# will only be run if object has no attribute item
raise AttributeError("parameter file has no attribute '{}'".format(item))
def __str__(self):
maxlen = len(max(self.keys, key=len)) + 1
return '\n'.join(['{key}:{sep}{value}'.format(key=key,
sep=(maxlen - len(key)) * ' ',
value=getattr(self, key)) for key in self.keys])
def envidict(self, nodata=None):
"""
export relevant metadata to an ENVI HDR file compliant format
Parameters
----------
nodata: int, float or None
a no data value to write to the HDR file via attribute 'data ignore value'
Returns
-------
dict
a dictionary containing attributes translated to ENVI HDR naming
"""
out = dict(bands=1,
header_offset=0,
file_type='ENVI Standard',
interleave='bsq',
sensor_type='Unknown',
byte_order=1,
wavelength_units='Unknown')
if hasattr(self, 'date'):
out['acquisition_time'] = self.date + 'Z'
out['samples'] = getattr(self, union(['width', 'range_samples', 'samples'], self.keys)[0])
out['lines'] = getattr(self, union(['nlines', 'azimuth_lines', 'lines'], self.keys)[0])
dtypes_lookup = {'FCOMPLEX': 6, 'FLOAT': 4, 'REAL*4': 4, 'INTEGER*2': 2, 'SHORT': 12}
dtype = getattr(self, union(['data_format', 'image_format'], self.keys)[0])
if dtype not in dtypes_lookup.keys():
raise TypeError('unsupported data type: {}'.format(dtype))
out['data_type'] = dtypes_lookup[dtype]
if nodata is not None:
out['data_ignore_value'] = nodata
if out['data_type'] == 6:
out['complex_function'] = 'Power'
# projections = ['AEAC', 'EQA', 'LCC', 'LCC2', 'OMCH', 'PC', 'PS', 'SCH', 'TM', 'UTM']
# the corner coordinates are shifted by 1/2 pixel to the Northwest since GAMMA pixel
# coordinates are defined for the pixel center while in ENVI it is the upper left
if hasattr(self, 'DEM_projection'):
if self.DEM_projection == 'UTM':
hem = 'North' if float(self.false_northing) == 0 else 'South'
out['map_info'] = ['UTM', '1.0000', '1.0000',
self.corner_east - (abs(self.post_east) / 2),
self.corner_north + (abs(self.post_north) / 2),
str(abs(float(self.post_east))),
str(abs(float(self.post_north))),
self.projection_zone, hem, 'WGS-84', 'units=Meters']
elif self.DEM_projection == 'EQA':
out['map_info'] = ['Geographic Lat/Lon', '1.0000', '1.0000',
self.corner_lon - (abs(self.post_lon) / 2),
self.corner_lat + (abs(self.post_lat) / 2),
str(abs(float(self.post_lon))),
str(abs(float(self.post_lat))),
'WGS-84', 'units=Degrees']
else:
raise RuntimeError('unsupported projection: {}'.format(self.DEM_projection))
return out
def par2hdr(parfile, hdrfile, modifications=None, nodata=None):
"""
Create an ENVI HDR file from a GAMMA PAR file
Parameters
----------
parfile: str
the GAMMA parfile
hdrfile: str
the ENVI HDR file
modifications: dict or None
a dictionary containing value deviations to write to the HDR file
nodata: int, float or None
a no data value to write to the HDR file via attribute 'data ignore value'
Returns
-------
Examples
--------
>>> from pyroSAR.gamma.auxil import par2hdr
>>> par2hdr('dem_seg.par', 'inc.hdr')
# write a HDR file for byte data based on a parfile of float data
>>> par2hdr('dem_seg.par', 'ls_map.hdr', modifications={'data_type': 1})
See Also
--------
:class:`spatialist.envi.HDRobject`
:func:`spatialist.envi.hdr`
"""
with ISPPar(parfile) as par:
items = par.envidict(nodata)
if modifications is not None:
items.update(modifications)
hdr(items, hdrfile)
class UTM(object):
"""
convert a gamma parameter file corner coordinate from EQA to UTM
Parameters
----------
parfile: str
the GAMMA parameter file to read the coordinate from
Example
-------
>>> from pyroSAR.gamma import UTM
>>> print(UTM('gamma.par').zone)
"""
def __init__(self, parfile):
par = ISPPar(parfile)
inlist = ['WGS84', 1, 'EQA', par.corner_lon, par.corner_lat, '', 'WGS84', 1, 'UTM', '']
inlist = map(str, inlist)
proc = sp.Popen(['coord_trans'], stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE,
universal_newlines=True, shell=False)
out, err = proc.communicate(''.join([x + '\n' for x in inlist]))
out = [x for x in filter(None, out.split('\n')) if ':' in x]
self.meta = dict()
for line in out:
key, value = re.split(r'\s*:\s*', line)
value = value.split()
value = map(parse_literal, value) if len(value) > 1 else value[0]
self.meta[key] = value
try:
self.zone, self.northing, self.easting, self.altitude = \
self.meta['UTM zone/northing/easting/altitude (m)']
except KeyError:
self.zone, self.northing, self.easting = \
self.meta['UTM zone/northing/easting (m)']
def process(cmd, outdir=None, logfile=None, logpath=None, inlist=None, void=True, shellscript=None):
"""
wrapper function to execute GAMMA commands via module :mod:`subprocess`
Parameters
----------
cmd: list[str]
the command line arguments
outdir: str
the directory to execute the command in
logfile: str
a file to write the command log to; overrides parameter logpath
logpath: str
a directory to write logfiles to; the file will be named {GAMMA command}.log, e.g. gc_map.log;
is overridden by parameter logfile
inlist: list
a list of values, which is passed as interactive inputs via stdin
void: bool
return the stdout and stderr messages?
shellscript: str
a file to write the GAMMA commands to in shell format
Returns
-------
tuple of str or None
the stdout and stderr messages if void is False, otherwise None
"""
if logfile is not None:
log = logfile
else:
log = os.path.join(logpath, os.path.basename(cmd[0]) + '.log') if logpath else None
gamma_home = ExamineGamma().home
if shellscript is not None:
if not os.path.isfile(shellscript):
# create an empty file
with open(shellscript, 'w') as init:
pass
line = ' '.join([str(x) for x in dissolve(cmd)])
if inlist is not None:
line += ' <<< $"{}"'.format('\n'.join([str(x) for x in inlist]) + '\n')
with open(shellscript, 'r+') as sh:
if outdir is not None:
content = sh.read()
sh.seek(0)
is_new = re.search('this script was created automatically by pyroSAR', content) is None
if is_new:
ts = datetime.now().strftime('%a %b %d %H:%M:%S %Y')
sh.write('# this script was created automatically by pyroSAR on {}\n\n'.format(ts))
sh.write('export base={}\n'.format(outdir))
sh.write('export GAMMA_HOME={}\n\n'.format(gamma_home))
sh.write(content)
line = line.replace(outdir, '$base').replace(gamma_home, '$GAMMA_HOME')
sh.seek(0, 2) # set pointer to the end of the file
sh.write(line + '\n\n')
# create an environment containing the locations of all GAMMA submodules to be passed to the subprocess calls
gammaenv = os.environ.copy()
gammaenv['GAMMA_HOME'] = gamma_home
out, err = run([ExamineGamma().gdal_config, '--datadir'], void=False)
gammaenv['GDAL_DATA'] = out.strip()
for module in ['DIFF', 'DISP', 'IPTA', 'ISP', 'LAT']:
loc = os.path.join(gammaenv['GAMMA_HOME'], module)
if os.path.isdir(loc):
gammaenv[module + '_HOME'] = loc
for submodule in ['bin', 'scripts']:
subloc = os.path.join(loc, submodule)
if os.path.isdir(subloc):
gammaenv['PATH'] += os.pathsep + subloc
# execute the command
out, err = run(cmd, outdir=outdir, logfile=log, inlist=inlist, void=False, errorpass=True, env=gammaenv)
gammaErrorHandler(out, err)
if not void:
return out, err
class Spacing(object):
"""
compute multilooking factors and pixel spacings from an ISPPar object for a defined ground range target pixel spacing
Parameters
----------
par: str or ISPPar
the ISP parameter file
spacing: int or float
the target pixel spacing in ground range
"""
def __init__(self, par, spacing='automatic'):
# compute ground range pixel spacing
par = par if isinstance(par, ISPPar) else ISPPar(par)
self.groundRangePS = par.range_pixel_spacing / (math.sin(math.radians(par.incidence_angle)))
# compute initial multilooking factors
if spacing == 'automatic':
if self.groundRangePS > par.azimuth_pixel_spacing:
ratio = self.groundRangePS / par.azimuth_pixel_spacing
self.rlks = 1
self.azlks = int(round(ratio))
else:
ratio = par.azimuth_pixel_spacing / self.groundRangePS
self.rlks = int(round(ratio))
self.azlks = 1
else:
self.rlks = int(round(float(spacing) / self.groundRangePS))
self.azlks = int(round(float(spacing) / par.azimuth_pixel_spacing))
class Namespace(object):
def __init__(self, directory, basename):
self.__base = basename
self.__outdir = directory
self.__reg = []
def __getitem__(self, item):
item = str(item).replace('.', '_')
return self.get(item)
def __getattr__(self, item):
# will only be run if object has no attribute item
return '-'
def appreciate(self, keys):
for key in keys:
setattr(self, key.replace('.', '_'), os.path.join(self.__outdir, self.__base + '_' + key))
if key not in self.__reg:
self.__reg.append(key.replace('.', '_'))
def depreciate(self, keys):
for key in keys:
setattr(self, key.replace('.', '_'), '-')
if key not in self.__reg:
self.__reg.append(key.replace('.', '_'))
def getall(self):
out = {}
for key in self.__reg:
out[key] = getattr(self, key)
return out
def select(self, selection):
return [getattr(self, key) for key in selection]
def isregistered(self, key):
return key in self.__reg
def isappreciated(self, key):
if self.isregistered(key):
if self.get(key) != '-':
return True
return False
def isfile(self, key):
return hasattr(self, key) and os.path.isfile(getattr(self, key))
def get(self, key):
return getattr(self, key)
def slc_corners(parfile):
"""
extract the corner coordinates of a SAR scene
Parameters
----------
parfile: str
the GAMMA parameter file to read coordinates from
Returns
-------
dict of float
a dictionary with keys xmin, xmax, ymin, ymax
"""
out, err = process(['SLC_corners', parfile], void=False)
pts = {}
pattern = r'-?[0-9]+\.[0-9]+'
for line in out.split('\n'):
if line.startswith('min. latitude'):
pts['ymin'], pts['ymax'] = [float(x) for x in
re.findall(pattern, line)]
elif line.startswith('min. longitude'):
pts['xmin'], pts['xmax'] = [float(x) for x in
re.findall(pattern, line)]
return pts
def do_execute(par, ids, exist_ok):
"""
small helper function to assess whether a GAMMA command shall be executed.
Parameters
----------
par: dict
a dictionary containing all arguments for the command
ids: list
the IDs of the output files
exist_ok: bool
allow existing output files?
Returns
-------
bool
execute the command because (a) not all output files exist or (b) existing files are not allowed
"""
all_exist = all([os.path.isfile(par[x]) for x in ids if par[x] != '-'])
return (exist_ok and not all_exist) or not exist_ok
| |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231,E0202
from pandas.compat import lmap
from pandas import compat
import numpy as np
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.cast import maybe_upcast, find_common_type
from pandas.core.dtypes.common import _ensure_platform_int, is_scipy_sparse
from pandas.core.common import _try_sort
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.series import Series
from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray,
_default_index)
import pandas.core.algorithms as algos
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays)
import pandas.core.generic as generic
from pandas.core.sparse.series import SparseSeries, SparseArray
from pandas._libs.sparse import BlockIndex, get_blocks
from pandas.util._decorators import Appender
import pandas.core.ops as ops
_shared_doc_kwargs = dict(klass='SparseDataFrame')
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame or scipy.sparse.spmatrix
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries
(default: nan). Will not override SparseSeries passed in.
"""
_constructor_sliced = SparseSeries
_subtyp = 'sparse_frame'
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, 'name'):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = 'block'
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if is_scipy_sparse(data):
mgr = self._init_spmatrix(data, index, columns, dtype=dtype,
fill_value=default_fill_value)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns, dtype=dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(data._data,
dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = _ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(np.nan, index=index,
kind=self._default_kind,
fill_value=self._default_fill_value)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
generic.NDFrame.__init__(self, mgr)
@property
def _constructor(self):
return SparseDataFrame
_constructor_sliced = SparseSeries
def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = _ensure_index(columns)
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
else:
columns = Index(_try_sort(list(data.keys())))
if index is None:
index = extract_index(list(data.values()))
sp_maker = lambda x: SparseArray(x, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=True, dtype=dtype)
sdict = {}
for k, v in compat.iteritems(data):
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
v = v.reindex(index)
if not isinstance(v, SparseSeries):
v = sp_maker(v.values)
elif isinstance(v, SparseArray):
v = v.copy()
else:
if isinstance(v, dict):
v = [v.get(i, np.nan) for i in index]
v = sp_maker(v)
sdict[k] = v
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
nan_arr = np.empty(len(index), dtype='float64')
nan_arr.fill(np.nan)
nan_arr = sp_maker(nan_arr)
sdict.update((c, nan_arr) for c in columns if c not in sdict)
return to_manager(sdict, columns, index)
def _init_matrix(self, data, index, columns, dtype=None):
""" Init self from ndarray or list of lists """
data = _prep_ndarray(data, copy=False)
index, columns = self._prep_index(data, index, columns)
data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)])
return self._init_dict(data, index, columns, dtype)
def _init_spmatrix(self, data, index, columns, dtype=None,
fill_value=None):
""" Init self from scipy.sparse matrix """
index, columns = self._prep_index(data, index, columns)
data = data.tocoo()
N = len(index)
# Construct a dict of SparseSeries
sdict = {}
values = Series(data.data, index=data.row, copy=False)
for col, rowvals in values.groupby(data.col):
# get_blocks expects int32 row indices in sorted order
rowvals = rowvals.sort_index()
rows = rowvals.index.values.astype(np.int32)
blocs, blens = get_blocks(rows)
sdict[columns[col]] = SparseSeries(
rowvals.values, index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, blocs, blens))
# Add any columns that were empty and thus not grouped on above
sdict.update({column: SparseSeries(index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, [], []))
for column in columns
if column not in sdict})
return self._init_dict(sdict, index, columns, dtype)
def _prep_index(self, data, index, columns):
N, K = data.shape
if index is None:
index = _default_index(N)
if columns is None:
columns = _default_index(K)
if len(columns) != K:
raise ValueError('Column length mismatch: {columns} vs. {K}'
.format(columns=len(columns), K=K))
if len(index) != N:
raise ValueError('Index length mismatch: {index} vs. {N}'
.format(index=len(index), N=N))
return index, columns
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.20.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
try:
from scipy.sparse import coo_matrix
except ImportError:
raise ImportError('Scipy is not installed')
dtype = find_common_type(self.dtypes)
cols, rows, datas = [], [], []
for col, name in enumerate(self):
s = self[name]
row = s.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self.shape)
def __array_wrap__(self, result):
return self._constructor(
result, index=self.index, columns=self.columns,
default_kind=self._default_kind,
default_fill_value=self._default_fill_value).__finalize__(self)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
_default_fill_value=self._default_fill_value,
_default_kind=self._default_kind)
def _unpickle_sparse_frame_compat(self, state):
""" original pickle format """
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = _unpickle_array(cols)
else:
columns = cols
if not isinstance(idx, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
index = _unpickle_array(idx)
else:
index = idx
series_dict = DataFrame()
for col, (sp_index, sp_values) in compat.iteritems(series):
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = dict((k, v.to_dense()) for k, v in compat.iteritems(self))
return DataFrame(data, index=self.index, columns=self.columns)
def _apply_columns(self, func):
""" get new SparseDataFrame applying func to each columns """
new_data = {}
for col, series in compat.iteritems(self):
new_data[col] = func(series)
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def astype(self, dtype):
return self._apply_columns(lambda x: x.astype(dtype))
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super(SparseDataFrame, self).copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result
@property
def default_fill_value(self):
return self._default_fill_value
@property
def default_kind(self):
return self._default_kind
@property
def density(self):
"""
Ratio of non-sparse points to total (dense) data points
represented in the frame
"""
tot_nonsparse = sum([ser.sp_index.npoints
for _, ser in compat.iteritems(self)])
tot = len(self.index) * len(self.columns)
return tot_nonsparse / float(tot)
def fillna(self, value=None, method=None, axis=0, inplace=False,
limit=None, downcast=None):
new_self = super(SparseDataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast)
if not inplace:
self = new_self
# set the fill value if we are filling as a scalar with nothing special
# going on
if (value is not None and value == value and method is None and
limit is None):
self._default_fill_value = value
if not inplace:
return self
# ----------------------------------------------------------------------
# Support different internal representation of SparseDataFrame
def _sanitize_column(self, key, value, **kwargs):
"""
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
"""
sp_maker = lambda x, index=None: SparseArray(
x, index=index, fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean
def __getitem__(self, key):
"""
Retrieve column or slice from DataFrame
"""
if isinstance(key, slice):
date_rng = self.index[key]
return self.reindex(date_rng)
elif isinstance(key, (np.ndarray, list, Series)):
return self._getitem_array(key)
else:
return self._get_item_cache(key)
@Appender(DataFrame.get_value.__doc__, indents=0)
def get_value(self, index, col, takeable=False):
if takeable is True:
series = self._iget_item_cache(col)
else:
series = self._get_item_cache(col)
return series.get_value(index, takeable=takeable)
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Notes
-----
This method *always* returns a new object. It is currently not
particularly efficient (and potentially very expensive) but is provided
for API compatibility with DataFrame
Returns
-------
frame : DataFrame
"""
dense = self.to_dense().set_value(index, col, value, takeable=takeable)
return dense.to_sparse(kind=self._default_kind,
fill_value=self._default_fill_value)
def _slice(self, slobj, axis=0, kind=None):
if axis == 0:
new_index = self.index[slobj]
new_columns = self.columns
else:
new_index = self.index
new_columns = self.columns[slobj]
return self.reindex(index=new_index, columns=new_columns)
def xs(self, key, axis=0, copy=False):
"""
Returns a row (cross-section) from the SparseDataFrame as a Series
object.
Parameters
----------
key : some index contained in the index
Returns
-------
xs : Series
"""
if axis == 1:
data = self[key]
return data
i = self.index.get_loc(key)
data = self.take([i]).get_values()[0]
return Series(data, index=self.columns)
# ----------------------------------------------------------------------
# Arithmetic-related methods
def _combine_frame(self, other, func, fill_value=None, level=None,
try_cast=True):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
if level is not None:
raise NotImplementedError("'level' argument is not supported")
if self.empty and other.empty:
return self._constructor(index=new_index).__finalize__(self)
new_data = {}
new_fill_value = None
if fill_value is not None:
# TODO: be a bit more intelligent here
for col in new_columns:
if col in this and col in other:
dleft = this[col].to_dense()
dright = other[col].to_dense()
result = dleft._binop(dright, func, fill_value=fill_value)
result = result.to_sparse(fill_value=this[col].fill_value)
new_data[col] = result
else:
for col in new_columns:
if col in this and col in other:
new_data[col] = func(this[col], other[col])
# if the fill values are the same use them? or use a valid one
other_fill_value = getattr(other, 'default_fill_value', np.nan)
if self.default_fill_value == other_fill_value:
new_fill_value = self.default_fill_value
elif np.isnan(self.default_fill_value) and not np.isnan(
other_fill_value):
new_fill_value = other_fill_value
elif not np.isnan(self.default_fill_value) and np.isnan(
other_fill_value):
new_fill_value = self.default_fill_value
return self._constructor(data=new_data, index=new_index,
columns=new_columns,
default_fill_value=new_fill_value
).__finalize__(self)
def _combine_match_index(self, other, func, level=None, fill_value=None,
try_cast=True):
new_data = {}
if fill_value is not None:
raise NotImplementedError("'fill_value' argument is not supported")
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_index = self.index.union(other.index)
this = self
if self.index is not new_index:
this = self.reindex(new_index)
if other.index is not new_index:
other = other.reindex(new_index)
for col, series in compat.iteritems(this):
new_data[col] = func(series.values, other.values)
# fill_value is a function of our operator
if isna(other.fill_value) or isna(self.default_fill_value):
fill_value = np.nan
else:
fill_value = func(np.float64(self.default_fill_value),
np.float64(other.fill_value))
return self._constructor(
new_data, index=new_index, columns=self.columns,
default_fill_value=fill_value).__finalize__(self)
def _combine_match_columns(self, other, func, level=None, fill_value=None,
try_cast=True):
# patched version of DataFrame._combine_match_columns to account for
# NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,
# where 3.0 is numpy.float64 and series is a SparseSeries. Still
# possible for this to happen, which is bothersome
if fill_value is not None:
raise NotImplementedError("'fill_value' argument is not supported")
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_data = {}
union = intersection = self.columns
if not union.equals(other.index):
union = other.index.union(self.columns)
intersection = other.index.intersection(self.columns)
for col in intersection:
new_data[col] = func(self[col], float(other[col]))
return self._constructor(
new_data, index=self.index, columns=union,
default_fill_value=self.default_fill_value).__finalize__(self)
def _combine_const(self, other, func, raise_on_error=True, try_cast=True):
return self._apply_columns(lambda x: func(x, other))
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return self._constructor(
index=index, columns=self.columns).__finalize__(self)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = _ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
if mask.all():
continue
values = series.values
# .take returns SparseArray
new = values.take(indexer)
if need_mask:
new = new.values
# convert integer to float if necessary. need to do a lot
# more than that, handle boolean etc also
new, fill_value = maybe_upcast(new, fill_value=fill_value)
np.putmask(new, mask, fill_value)
new_series[col] = new
return self._constructor(
new_series, index=index, columns=self.columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_columns(self, columns, method, copy, level, fill_value=None,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if notna(fill_value):
raise NotImplementedError("'fill_value' argument is not supported")
if limit:
raise NotImplementedError("'limit' argument is not supported")
if method is not None:
raise NotImplementedError("'method' argument is not supported")
# TODO: fill value handling
sdict = dict((k, v) for k, v in compat.iteritems(self) if k in columns)
return self._constructor(
sdict, index=self.index, columns=columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,
limit=None, copy=False, allow_dups=False):
if method is not None or limit is not None:
raise NotImplementedError("cannot reindex with a method or limit "
"with sparse")
if fill_value is None:
fill_value = np.nan
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
if columns is None:
columns = self.columns
new_arrays = {}
for col in columns:
if col not in self:
continue
if row_indexer is not None:
new_arrays[col] = algos.take_1d(self[col].get_values(),
row_indexer,
fill_value=fill_value)
else:
new_arrays[col] = self[col]
return self._constructor(new_arrays, index=index,
columns=columns).__finalize__(self)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
if on is not None:
raise NotImplementedError("'on' keyword parameter is not yet "
"implemented")
return self._join_index(other, how, lsuffix, rsuffix)
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = SparseDataFrame(
{other.name: other},
default_fill_value=self._default_fill_value)
join_index = self.index.join(other.index, how=how)
this = self.reindex(join_index)
other = other.reindex(join_index)
this, other = this._maybe_rename_join(other, lsuffix, rsuffix)
from pandas import concat
return concat([this, other], axis=1, verify_integrity=True)
def _maybe_rename_join(self, other, lsuffix, rsuffix):
to_rename = self.columns.intersection(other.columns)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: '
'{to_rename}'.format(to_rename=to_rename))
def lrenamer(x):
if x in to_rename:
return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix)
return x
this = self.rename(columns=lrenamer)
other = other.rename(columns=rrenamer)
else:
this = self
return this, other
def transpose(self, *args, **kwargs):
"""
Returns a DataFrame with the rows/columns switched.
"""
nv.validate_transpose(args, kwargs)
return self._constructor(
self.values.T, index=self.columns, columns=self.index,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
T = property(transpose)
@Appender(DataFrame.count.__doc__)
def count(self, axis=0, **kwds):
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.count(), axis=axis)
def cumsum(self, axis=0, *args, **kwargs):
"""
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
"""
nv.validate_cumsum(args, kwargs)
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.cumsum(), axis=axis)
@Appender(generic._shared_docs['isna'])
def isna(self):
return self._apply_columns(lambda x: x.isna())
isnull = isna
@Appender(generic._shared_docs['notna'])
def notna(self):
return self._apply_columns(lambda x: x.notna())
notnull = notna
def apply(self, func, axis=0, broadcast=False, reduce=False):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in compat.iteritems(self):
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
else:
if not broadcast:
return self._apply_standard(func, axis, reduce=reduce)
else:
return self._apply_broadcast(func, axis)
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Returns
-------
applied : DataFrame
"""
return self.apply(lambda x: lmap(func, x))
def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series,
columns, index
"""
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(
[sdf[c] for c in columns], columns, axes)
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
nobs = sum(lengths)
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
for _, series in compat.iteritems(frame):
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
stacked_values = np.concatenate(vals_to_concat)
index = MultiIndex(levels=[frame.index, frame.columns],
labels=[major_labels, minor_labels],
verify_integrity=False)
lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,
columns=['foo'])
return lp.sort_index(level=0)
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in compat.iteritems(series_dict):
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in compat.iteritems(series_dict):
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output
# use unaccelerated ops for sparse objects
ops.add_flex_arithmetic_methods(SparseDataFrame, use_numexpr=False,
**ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(SparseDataFrame, use_numexpr=False,
**ops.frame_special_funcs)
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from frappe.utils.minify import JavascriptMinify
"""
Build the `public` folders and setup languages
"""
import os, frappe, json, shutil, re
# from cssmin import cssmin
app_paths = None
def setup():
global app_paths
pymodules = []
for app in frappe.get_all_apps(True):
try:
pymodules.append(frappe.get_module(app))
except ImportError: pass
app_paths = [os.path.dirname(pymodule.__file__) for pymodule in pymodules]
def bundle(no_compress, make_copy=False, verbose=False):
"""concat / minify js files"""
# build js files
setup()
make_asset_dirs(make_copy=make_copy)
build(no_compress, verbose)
def watch(no_compress):
"""watch and rebuild if necessary"""
setup()
import time
compile_less()
build(no_compress=True)
while True:
compile_less()
if files_dirty():
build(no_compress=True)
time.sleep(3)
def make_asset_dirs(make_copy=False):
assets_path = os.path.join(frappe.local.sites_path, "assets")
for dir_path in [
os.path.join(assets_path, 'js'),
os.path.join(assets_path, 'css')]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# symlink app/public > assets/app
for app_name in frappe.get_all_apps(True):
pymodule = frappe.get_module(app_name)
app_base_path = os.path.abspath(os.path.dirname(pymodule.__file__))
symlinks = []
symlinks.append([os.path.join(app_base_path, 'public'), os.path.join(assets_path, app_name)])
symlinks.append([os.path.join(app_base_path, 'docs'), os.path.join(assets_path, app_name + '_docs')])
for source, target in symlinks:
source = os.path.abspath(source)
if not os.path.exists(target) and os.path.exists(source):
if make_copy:
shutil.copytree(source, target)
else:
os.symlink(source, target)
def build(no_compress=False, verbose=False):
assets_path = os.path.join(frappe.local.sites_path, "assets")
for target, sources in get_build_maps().iteritems():
pack(os.path.join(assets_path, target), sources, no_compress, verbose)
def get_build_maps():
"""get all build.jsons with absolute paths"""
# framework js and css files
build_maps = {}
for app_path in app_paths:
path = os.path.join(app_path, 'public', 'build.json')
if os.path.exists(path):
with open(path) as f:
try:
for target, sources in json.loads(f.read()).iteritems():
# update app path
source_paths = []
for source in sources:
if isinstance(source, list):
s = frappe.get_pymodule_path(source[0], *source[1].split("/"))
else:
s = os.path.join(app_path, source)
source_paths.append(s)
build_maps[target] = source_paths
except ValueError, e:
print path
print 'JSON syntax error {0}'.format(str(e))
return build_maps
timestamps = {}
def pack(target, sources, no_compress, verbose):
from cStringIO import StringIO
outtype, outtxt = target.split(".")[-1], ''
jsm = JavascriptMinify()
for f in sources:
suffix = None
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f):
print "did not find " + f
continue
timestamps[f] = os.path.getmtime(f)
try:
with open(f, 'r') as sourcefile:
data = unicode(sourcefile.read(), 'utf-8', errors='ignore')
extn = f.rsplit(".", 1)[1]
if outtype=="js" and extn=="js" and (not no_compress) and suffix!="concat" and (".min." not in f):
tmpin, tmpout = StringIO(data.encode('utf-8')), StringIO()
jsm.minify(tmpin, tmpout)
minified = tmpout.getvalue()
if minified:
outtxt += unicode(minified or '', 'utf-8').strip('\n') + ';'
if verbose:
print "{0}: {1}k".format(f, int(len(minified) / 1024))
elif outtype=="js" and extn=="html":
# add to frappe.templates
outtxt += html_to_js_template(f, data)
else:
outtxt += ('\n/*\n *\t%s\n */' % f)
outtxt += '\n' + data + '\n'
except Exception:
print "--Error in:" + f + "--"
print frappe.get_traceback()
if not no_compress and outtype == 'css':
pass
#outtxt = cssmin(outtxt)
with open(target, 'w') as f:
f.write(outtxt.encode("utf-8"))
print "Wrote %s - %sk" % (target, str(int(os.path.getsize(target)/1024)))
def html_to_js_template(path, content):
'''returns HTML template content as Javascript code, adding it to `frappe.templates`'''
return """frappe.templates["{key}"] = '{content}';\n""".format(\
key=path.rsplit("/", 1)[-1][:-5], content=scrub_html_template(content))
def scrub_html_template(content):
'''Returns HTML content with removed whitespace and comments'''
# remove whitespace to a single space
content = re.sub("\s+", " ", content)
# strip comments
content = re.sub("(<!--.*?-->)", "", content)
return content.replace("'", "\'")
def files_dirty():
for target, sources in get_build_maps().iteritems():
for f in sources:
if ':' in f: f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f): continue
if os.path.getmtime(f) != timestamps.get(f):
print f + ' dirty'
return True
else:
return False
def compile_less():
from distutils.spawn import find_executable
if not find_executable("lessc"):
return
for path in app_paths:
less_path = os.path.join(path, "public", "less")
if os.path.exists(less_path):
for fname in os.listdir(less_path):
if fname.endswith(".less") and fname != "variables.less":
fpath = os.path.join(less_path, fname)
mtime = os.path.getmtime(fpath)
if fpath in timestamps and mtime == timestamps[fpath]:
continue
timestamps[fpath] = mtime
print "compiling {0}".format(fpath)
css_path = os.path.join(path, "public", "css", fname.rsplit(".", 1)[0] + ".css")
os.system("lessc {0} > {1}".format(fpath, css_path))
| |
from .context import get_current_config, connections, log, run_task, metric
import time
import datetime
import gevent
import argparse
import random
import shlex
import traceback
from collections import defaultdict
from bson import ObjectId
try:
from redis.lock import LuaLock
except ImportError:
# Change name to avoid NameError raised when use of LuaLock at line 147
from redis.lock import Lock as LuaLock
from .processes import Process, ProcessPool
from .utils import MovingETA, normalize_command
from .queue import Queue
class Agent(Process):
""" MRQ Agent manages its local worker pool and takes turns in orchestrating the others in its group. """
def __init__(self, worker_group=None):
self.greenlets = {}
self.id = ObjectId()
self.worker_group = worker_group or get_current_config()["worker_group"]
self.pool = ProcessPool(extra_env={
"MRQ_AGENT_ID": str(self.id),
"MRQ_WORKER_GROUP": self.worker_group
})
self.config = get_current_config()
self.status = "started"
metric("agent", data={"worker_group": self.worker_group, "agent_id": self.id})
self.dateorchestrated = None
# global redis key used to ensure only one agent orchestrator runs at a time
self.redis_queuestats_lock_key = "%s:queuestatslock" % (self.config["redis_prefix"])
# global HSET redis key used to store queue stats
self.redis_queuestats_key= "%s:queuestats" % (self.config["redis_prefix"])
def work(self):
self.install_signal_handlers()
self.datestarted = datetime.datetime.utcnow()
self.pool.start()
self.manage()
self.greenlets["orchestrate"] = gevent.spawn(self.greenlet_orchestrate)
self.greenlets["orchestrate"].start()
self.greenlets["manage"] = gevent.spawn(self.greenlet_manage)
self.greenlets["manage"].start()
# Disabled for now
# self.greenlets["queuestats"] = gevent.spawn(self.greenlet_queuestats)
# self.greenlets["queuestats"].start()
try:
self.pool.wait()
finally:
self.shutdown_now()
self.status = "stop"
self.manage()
def shutdown_now(self):
self.pool.terminate()
for g in self.greenlets.values():
g.kill()
def shutdown_graceful(self):
self.pool.stop(timeout=None)
def greenlet_manage(self):
""" This greenlet always runs in background to update current status
in MongoDB every N seconds.
"""
while True:
try:
self.manage()
except Exception as e: # pylint: disable=broad-except
log.error("When reporting: %s" % e)
finally:
time.sleep(self.config["report_interval"])
def manage(self):
report = self.get_agent_report()
try:
db = connections.mongodb_jobs.mrq_agents.find_and_modify({
"_id": ObjectId(self.id)
}, {"$set": report}, upsert=True)
if not db:
return
except Exception as e: # pylint: disable=broad-except
log.debug("Agent report failed: %s" % e)
return
# If the desired_workers was changed by an orchestrator, apply the changes locally
if self.status != "stop" and sorted(db.get("desired_workers", [])) != sorted(self.pool.desired_commands):
group = self.fetch_worker_group_definition()
process_termination_timeout = float(group.get("process_termination_timeout") or 60)
self.pool.set_commands(db.get("desired_workers", []), timeout=process_termination_timeout)
def get_agent_report(self):
report = {
"current_workers": [p["command"] for p in self.pool.processes],
"total_cpu": get_current_config()["total_cpu"],
"total_memory": get_current_config()["total_memory"],
"worker_group": self.worker_group,
"status": self.status,
"dateorchestrated": self.dateorchestrated,
"datestarted": self.datestarted,
"datereported": datetime.datetime.utcnow(),
"dateexpires": datetime.datetime.utcnow() + datetime.timedelta(seconds=(self.config["report_interval"] * 3) + 5)
}
metric("agent", data={"worker_group": self.worker_group, "agent_id": self.id, "worker_count": len(self.pool.processes)})
return report
def greenlet_orchestrate(self):
while True:
try:
self.orchestrate()
except Exception as e:
log.error("Orchestration error! %s" % e)
traceback.print_exc()
time.sleep(self.config["orchestrate_interval"])
def orchestrate(self):
run_task("mrq.basetasks.orchestrator.Orchestrate", {})
def greenlet_queuestats(self):
interval = min(self.config["orchestrate_interval"], 1 * 60)
lock_timeout = 5 * 60 + (interval * 2)
while True:
lock = LuaLock(connections.redis, self.redis_queuestats_lock_key,
timeout=lock_timeout, thread_local=False, blocking=False)
with lock:
lock_expires = time.time() + lock_timeout
self.queue_etas = defaultdict(lambda: MovingETA(5))
while True:
self.queuestats()
# Because queue stats can be expensive, we try to keep the lock on the same agent
lock_extend = (time.time() + lock_timeout) - lock_expires
lock_expires += lock_extend
lock.extend(lock_extend)
time.sleep(interval)
time.sleep(interval)
def queuestats(self):
""" Compute ETAs for every known queue & subqueue """
start_time = time.time()
log.debug("Starting queue stats...")
# Fetch all known queues
queues = [Queue(q) for q in Queue.all_known()]
new_queues = {queue.id for queue in queues}
old_queues = set(self.queue_etas.keys())
for deleted_queue in old_queues.difference(new_queues):
self.queue_etas.pop(deleted_queue)
t = time.time()
stats = {}
for queue in queues:
cnt = queue.count_jobs_to_dequeue()
eta = self.queue_etas[queue.id].next(cnt, t=t)
# Number of jobs to dequeue, ETA, Time of stats
stats[queue.id] = "%d %s %d" % (cnt, eta if eta is not None else "N", int(t))
with connections.redis.pipeline(transaction=True) as pipe:
if random.randint(0, 100) == 0 or len(stats) == 0:
pipe.delete(self.redis_queuestats_key)
if len(stats) > 0:
pipe.hmset(self.redis_queuestats_key, stats)
pipe.execute()
log.debug("... done queue stats in %0.4fs" % (time.time() - start_time))
def fetch_worker_group_definition(self):
definition = connections.mongodb_jobs.mrq_workergroups.find_one({"_id": self.worker_group})
# Prepend all commands by their worker profile.
commands = []
for command in definition.get("commands", []):
simplified_command, worker_count = normalize_command(command, self.worker_group)
commands.extend([simplified_command] * worker_count)
definition["commands"] = commands
return definition
| |
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
import os
import sys
from test.io_wrapper import StringIO
import wstool
import wstool.helpers
import wstool.wstool_cli
from wstool.wstool_cli import WstoolCLI
from wstool.wstool_cli import wstool_main
from test.scm_test_base import AbstractSCMTest, _add_to_file
from test.local.test_diff_functions_svn import create_svn_repo, modify_svn_repo
from test.local.test_diff_functions_git import create_git_repo, modify_git_repo
from test.local.test_diff_functions_hg import create_hg_repo, modify_hg_repo
from test.local.test_diff_functions_bzr import create_bzr_repo, modify_bzr_repo
class WstoolDiffMultiTest(AbstractSCMTest):
@classmethod
def setUpClass(self):
AbstractSCMTest.setUpClass()
remote_path_svn = os.path.join(self.test_root_path, "remote_svn")
remote_path_git = os.path.join(self.test_root_path, "remote_git")
remote_path_bzr = os.path.join(self.test_root_path, "remote_bzr")
remote_path_hg = os.path.join(self.test_root_path, "remote_hg")
os.makedirs(remote_path_git)
os.makedirs(remote_path_svn)
os.makedirs(remote_path_hg)
os.makedirs(remote_path_bzr)
filler_path = os.path.join(self.test_root_path, "filler")
svn_uri = "file://localhost"+remote_path_svn
create_svn_repo(self.test_root_path, remote_path_svn, filler_path, svn_uri)
create_git_repo(remote_path_git)
create_hg_repo(remote_path_hg)
create_bzr_repo(remote_path_bzr)
# wstool the remote repo and fake ros (using git twice to check all overlaps)
rosinstall_spec = """- other: {local-name: ../ros}
- git: {local-name: clone_git, uri: ../remote_git}
- svn: {local-name: clone_svn, uri: '%s'}
- hg: {local-name: clone_hg, uri: ../remote_hg}
- bzr: {local-name: clone_bzr, uri: ../remote_bzr}
- git: {local-name: clone_git2, uri: ../remote_git}""" % svn_uri
_add_to_file(os.path.join(self.local_path, ".rosinstall"), rosinstall_spec)
cmd = ["rosws", "update", "-t", "ws"]
os.chdir(self.test_root_path)
wstool_main(cmd)
clone_path_git = os.path.join(self.local_path, "clone_git")
clone_path_git2 = os.path.join(self.local_path, "clone_git2")
clone_path_svn = os.path.join(self.local_path, "clone_svn")
clone_path_hg = os.path.join(self.local_path, "clone_hg")
clone_path_bzr = os.path.join(self.local_path, "clone_bzr")
modify_git_repo(clone_path_git2)
modify_git_repo(clone_path_git)
modify_svn_repo(clone_path_svn)
modify_hg_repo(clone_path_hg)
modify_bzr_repo(clone_path_bzr)
def check_diff_output(self, output):
# this tests that there are proper newlines between diff outputs
# for svn, the order varies, so we check two known variants
self.assertTrue("\nIndex: clone_svn/added.txt" in output, output)
self.assertTrue("\nIndex: clone_svn/added.txt" in output, output)
self.assertTrue("\nIndex: clone_svn/modified.txt" in output, output)
self.assertTrue("\ndiff --git clone_hg/added.txt" in output, output)
self.assertTrue("\n=== added file 'added.txt'\n--- clone_bzr/added.txt" in output, output)
self.assertTrue("\ndiff --git clone_git2/added.txt" in output, output)
def test_multi_diff_rosinstall_outside(self):
'''Test wstool diff output from outside workspace.
In particular asserts that there are newlines between diffs, and no overlaps'''
cmd = ["wstool", "diff", "-t", "ws"]
os.chdir(self.test_root_path)
sys.stdout = output = StringIO()
wstool_main(cmd)
sys.stdout = sys.__stdout__
output = output.getvalue()
self.check_diff_output(output)
def test_multi_diff_wstool_outside(self):
'''Test wstool diff output from outside workspace.
In particular asserts that there are newlines between diffs, and no overlaps'''
cmd = ["wstool", "diff", "-t", "ws"]
os.chdir(self.test_root_path)
sys.stdout = output = StringIO()
wstool_main(cmd)
sys.stdout = sys.__stdout__
output = output.getvalue()
self.check_diff_output(output)
cli = WstoolCLI()
self.assertEqual(0, cli.cmd_diff(os.path.join(self.test_root_path, 'ws'), []))
def test_multi_diff_rosinstall_inside(self):
'''Test wstool diff output from inside workspace.
In particular asserts that there are newlines between diffs, and no overlaps'''
directory = self.test_root_path + "/ws"
cmd = ["wstool", "diff"]
os.chdir(directory)
sys.stdout = output = StringIO()
wstool_main(cmd)
output = output.getvalue()
self.check_diff_output(output)
def test_multi_diff_wstool_inside(self):
'''Test wstool diff output from inside workspace.
In particular asserts that there are newlines between diffs, and no overlaps'''
directory = self.test_root_path + "/ws"
cmd = ["wstool", "diff"]
os.chdir(directory)
sys.stdout = output = StringIO()
wstool_main(cmd)
output = output.getvalue()
sys.stdout = sys.__stdout__
self.check_diff_output(output)
cli = WstoolCLI()
self.assertEqual(0, cli.cmd_diff(directory, []))
def test_multi_status_rosinstall_inside(self):
"""Test wstool status output when run inside workspace.
In particular asserts that there are newlines between statuses, and no overlaps"""
directory = self.test_root_path + "/ws"
cmd = ["wstool", "status"]
os.chdir(directory)
sys.stdout = output = StringIO()
wstool_main(cmd)
output = output.getvalue()
self.assertStatusListEqual('A clone_git/added.txt\n D clone_git/deleted-fs.txt\nD clone_git/deleted.txt\n M clone_git/modified-fs.txt\nM clone_git/modified.txt\nA clone_svn/added.txt\nD clone_svn/deleted.txt\n! clone_svn/deleted-fs.txt\nM clone_svn/modified.txt\nM clone_hg/modified-fs.txt\nM clone_hg/modified.txt\nA clone_hg/added.txt\nR clone_hg/deleted.txt\n! clone_hg/deleted-fs.txt\n+N clone_bzr/added.txt\n D clone_bzr/deleted-fs.txt\n-D clone_bzr/deleted.txt\n M clone_bzr/modified-fs.txt\n M clone_bzr/modified.txt\nA clone_git2/added.txt\n D clone_git2/deleted-fs.txt\nD clone_git2/deleted.txt\n M clone_git2/modified-fs.txt\nM clone_git2/modified.txt\n', output)
def test_multi_status_wstool_inside(self):
"""Test wstool status output when run inside workspace.
In particular asserts that there are newlines between statuses, and no overlaps"""
directory = self.test_root_path + "/ws"
cmd = ["wstool", "status"]
os.chdir(directory)
sys.stdout = output = StringIO()
wstool_main(cmd)
output = output.getvalue()
sys.stdout = sys.__stdout__
self.assertStatusListEqual('A clone_git/added.txt\n D clone_git/deleted-fs.txt\nD clone_git/deleted.txt\n M clone_git/modified-fs.txt\nM clone_git/modified.txt\nA clone_svn/added.txt\nD clone_svn/deleted.txt\n! clone_svn/deleted-fs.txt\nM clone_svn/modified.txt\nM clone_hg/modified-fs.txt\nM clone_hg/modified.txt\nA clone_hg/added.txt\nR clone_hg/deleted.txt\n! clone_hg/deleted-fs.txt\n+N clone_bzr/added.txt\n D clone_bzr/deleted-fs.txt\n-D clone_bzr/deleted.txt\n M clone_bzr/modified-fs.txt\n M clone_bzr/modified.txt\nA clone_git2/added.txt\n D clone_git2/deleted-fs.txt\nD clone_git2/deleted.txt\n M clone_git2/modified-fs.txt\nM clone_git2/modified.txt\n', output)
cli = WstoolCLI()
self.assertEqual(0, cli.cmd_diff(directory, []))
def test_multi_status_rosinstall_outside(self):
"""Test wstool status output when run outside workspace.
In particular asserts that there are newlines between statuses, and no overlaps"""
cmd = ["rosinstall", "status", "-t", "ws"]
os.chdir(self.test_root_path)
sys.stdout = output = StringIO()
wstool_main(cmd)
sys.stdout = output = StringIO()
wstool_main(cmd)
sys.stdout = sys.__stdout__
output = output.getvalue()
self.assertStatusListEqual('A clone_git/added.txt\n D clone_git/deleted-fs.txt\nD clone_git/deleted.txt\n M clone_git/modified-fs.txt\nM clone_git/modified.txt\nA clone_svn/added.txt\nD clone_svn/deleted.txt\n! clone_svn/deleted-fs.txt\nM clone_svn/modified.txt\nM clone_hg/modified-fs.txt\nM clone_hg/modified.txt\nA clone_hg/added.txt\nR clone_hg/deleted.txt\n! clone_hg/deleted-fs.txt\n+N clone_bzr/added.txt\n D clone_bzr/deleted-fs.txt\n-D clone_bzr/deleted.txt\n M clone_bzr/modified-fs.txt\n M clone_bzr/modified.txt\nA clone_git2/added.txt\n D clone_git2/deleted-fs.txt\nD clone_git2/deleted.txt\n M clone_git2/modified-fs.txt\nM clone_git2/modified.txt\n', output)
def test_multi_status_wstool_outside(self):
"""Test wstool status output when run outside workspace.
In particular asserts that there are newlines between statuses, and no overlaps"""
cmd = ["wstool", "status", "-t", "ws"]
os.chdir(self.test_root_path)
sys.stdout = output = StringIO()
wstool_main(cmd)
sys.stdout = sys.__stdout__
output = output.getvalue()
self.assertStatusListEqual('A clone_git/added.txt\n D clone_git/deleted-fs.txt\nD clone_git/deleted.txt\n M clone_git/modified-fs.txt\nM clone_git/modified.txt\nA clone_svn/added.txt\nD clone_svn/deleted.txt\n! clone_svn/deleted-fs.txt\nM clone_svn/modified.txt\nM clone_hg/modified-fs.txt\nM clone_hg/modified.txt\nA clone_hg/added.txt\nR clone_hg/deleted.txt\n! clone_hg/deleted-fs.txt\n+N clone_bzr/added.txt\n D clone_bzr/deleted-fs.txt\n-D clone_bzr/deleted.txt\n M clone_bzr/modified-fs.txt\n M clone_bzr/modified.txt\nA clone_git2/added.txt\n D clone_git2/deleted-fs.txt\nD clone_git2/deleted.txt\n M clone_git2/modified-fs.txt\nM clone_git2/modified.txt\n', output)
cli = WstoolCLI()
self.assertEqual(0, cli.cmd_status(os.path.join(self.test_root_path, 'ws'), []))
def test_multi_status_untracked(self):
'''tests status output for --untracked.
In particular asserts that there are newlines between statuses, and no overlaps'''
cmd = ["wstool", "status", "-t", "ws", "--untracked"]
os.chdir(self.test_root_path)
sys.stdout = output = StringIO()
wstool_main(cmd)
sys.stdout = sys.__stdout__
output = output.getvalue()
self.assertStatusListEqual('A clone_git/added.txt\n D clone_git/deleted-fs.txt\nD clone_git/deleted.txt\n M clone_git/modified-fs.txt\nM clone_git/modified.txt\n?? clone_git/added-fs.txt\n? clone_svn/added-fs.txt\nA clone_svn/added.txt\nD clone_svn/deleted.txt\n! clone_svn/deleted-fs.txt\nM clone_svn/modified.txt\nM clone_hg/modified-fs.txt\nM clone_hg/modified.txt\nA clone_hg/added.txt\nR clone_hg/deleted.txt\n! clone_hg/deleted-fs.txt\n? clone_hg/added-fs.txt\n? clone_bzr/added-fs.txt\n+N clone_bzr/added.txt\n D clone_bzr/deleted-fs.txt\n-D clone_bzr/deleted.txt\n M clone_bzr/modified-fs.txt\n M clone_bzr/modified.txt\nA clone_git2/added.txt\n D clone_git2/deleted-fs.txt\nD clone_git2/deleted.txt\n M clone_git2/modified-fs.txt\nM clone_git2/modified.txt\n?? clone_git2/added-fs.txt\n', output)
cmd = ["wstool", "status", "-t", "ws", "--untracked"]
os.chdir(self.test_root_path)
sys.stdout = output = StringIO()
wstool_main(cmd)
sys.stdout = sys.__stdout__
output = output.getvalue()
self.assertStatusListEqual('A clone_git/added.txt\n D clone_git/deleted-fs.txt\nD clone_git/deleted.txt\n M clone_git/modified-fs.txt\nM clone_git/modified.txt\n?? clone_git/added-fs.txt\n? clone_svn/added-fs.txt\nA clone_svn/added.txt\nD clone_svn/deleted.txt\n! clone_svn/deleted-fs.txt\nM clone_svn/modified.txt\nM clone_hg/modified-fs.txt\nM clone_hg/modified.txt\nA clone_hg/added.txt\nR clone_hg/deleted.txt\n! clone_hg/deleted-fs.txt\n? clone_hg/added-fs.txt\n? clone_bzr/added-fs.txt\n+N clone_bzr/added.txt\n D clone_bzr/deleted-fs.txt\n-D clone_bzr/deleted.txt\n M clone_bzr/modified-fs.txt\n M clone_bzr/modified.txt\nA clone_git2/added.txt\n D clone_git2/deleted-fs.txt\nD clone_git2/deleted.txt\n M clone_git2/modified-fs.txt\nM clone_git2/modified.txt\n?? clone_git2/added-fs.txt\n', output)
cli = WstoolCLI()
self.assertEqual(0, cli.cmd_status(os.path.join(self.test_root_path, 'ws'), ["--untracked"]))
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from msrest import Deserializer, Serializer
from ._configuration import ResourceManagementClientConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
from azure.core.credentials_async import AsyncTokenCredential
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class ResourceManagementClient(MultiApiClientMixin, _SDKClient):
"""Provides operations for working with resources and resource groups.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param api_version: API version to use if no profile is provided, or if missing in profile.
:type api_version: str
:param base_url: Service URL
:type base_url: str
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2021-04-01'
_PROFILE_TAG = "azure.mgmt.resource.resources.ResourceManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
api_version: Optional[str] = None,
base_url: str = "https://management.azure.com",
profile: KnownProfiles = KnownProfiles.default,
**kwargs # type: Any
) -> None:
self._config = ResourceManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(ResourceManagementClient, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2016-02-01: :mod:`v2016_02_01.models<azure.mgmt.resource.resources.v2016_02_01.models>`
* 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.resources.v2016_09_01.models>`
* 2017-05-10: :mod:`v2017_05_10.models<azure.mgmt.resource.resources.v2017_05_10.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.resource.resources.v2018_02_01.models>`
* 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.resource.resources.v2018_05_01.models>`
* 2019-03-01: :mod:`v2019_03_01.models<azure.mgmt.resource.resources.v2019_03_01.models>`
* 2019-05-01: :mod:`v2019_05_01.models<azure.mgmt.resource.resources.v2019_05_01.models>`
* 2019-05-10: :mod:`v2019_05_10.models<azure.mgmt.resource.resources.v2019_05_10.models>`
* 2019-07-01: :mod:`v2019_07_01.models<azure.mgmt.resource.resources.v2019_07_01.models>`
* 2019-08-01: :mod:`v2019_08_01.models<azure.mgmt.resource.resources.v2019_08_01.models>`
* 2019-10-01: :mod:`v2019_10_01.models<azure.mgmt.resource.resources.v2019_10_01.models>`
* 2020-06-01: :mod:`v2020_06_01.models<azure.mgmt.resource.resources.v2020_06_01.models>`
* 2020-10-01: :mod:`v2020_10_01.models<azure.mgmt.resource.resources.v2020_10_01.models>`
* 2021-01-01: :mod:`v2021_01_01.models<azure.mgmt.resource.resources.v2021_01_01.models>`
* 2021-04-01: :mod:`v2021_04_01.models<azure.mgmt.resource.resources.v2021_04_01.models>`
"""
if api_version == '2016-02-01':
from ..v2016_02_01 import models
return models
elif api_version == '2016-09-01':
from ..v2016_09_01 import models
return models
elif api_version == '2017-05-10':
from ..v2017_05_10 import models
return models
elif api_version == '2018-02-01':
from ..v2018_02_01 import models
return models
elif api_version == '2018-05-01':
from ..v2018_05_01 import models
return models
elif api_version == '2019-03-01':
from ..v2019_03_01 import models
return models
elif api_version == '2019-05-01':
from ..v2019_05_01 import models
return models
elif api_version == '2019-05-10':
from ..v2019_05_10 import models
return models
elif api_version == '2019-07-01':
from ..v2019_07_01 import models
return models
elif api_version == '2019-08-01':
from ..v2019_08_01 import models
return models
elif api_version == '2019-10-01':
from ..v2019_10_01 import models
return models
elif api_version == '2020-06-01':
from ..v2020_06_01 import models
return models
elif api_version == '2020-10-01':
from ..v2020_10_01 import models
return models
elif api_version == '2021-01-01':
from ..v2021_01_01 import models
return models
elif api_version == '2021-04-01':
from ..v2021_04_01 import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def deployment_operations(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2016_02_01.aio.operations.DeploymentOperationsOperations>`
* 2016-09-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2016_09_01.aio.operations.DeploymentOperationsOperations>`
* 2017-05-10: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2017_05_10.aio.operations.DeploymentOperationsOperations>`
* 2018-02-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2018_02_01.aio.operations.DeploymentOperationsOperations>`
* 2018-05-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2018_05_01.aio.operations.DeploymentOperationsOperations>`
* 2019-03-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2019_03_01.aio.operations.DeploymentOperationsOperations>`
* 2019-05-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2019_05_01.aio.operations.DeploymentOperationsOperations>`
* 2019-05-10: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2019_05_10.aio.operations.DeploymentOperationsOperations>`
* 2019-07-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2019_07_01.aio.operations.DeploymentOperationsOperations>`
* 2019-08-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2019_08_01.aio.operations.DeploymentOperationsOperations>`
* 2019-10-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2019_10_01.aio.operations.DeploymentOperationsOperations>`
* 2020-06-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2020_06_01.aio.operations.DeploymentOperationsOperations>`
* 2020-10-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2020_10_01.aio.operations.DeploymentOperationsOperations>`
* 2021-01-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2021_01_01.aio.operations.DeploymentOperationsOperations>`
* 2021-04-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.resources.v2021_04_01.aio.operations.DeploymentOperationsOperations>`
"""
api_version = self._get_api_version('deployment_operations')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2020-10-01':
from ..v2020_10_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2021-01-01':
from ..v2021_01_01.aio.operations import DeploymentOperationsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import DeploymentOperationsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'deployment_operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def deployments(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2016_02_01.aio.operations.DeploymentsOperations>`
* 2016-09-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2016_09_01.aio.operations.DeploymentsOperations>`
* 2017-05-10: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2017_05_10.aio.operations.DeploymentsOperations>`
* 2018-02-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2018_02_01.aio.operations.DeploymentsOperations>`
* 2018-05-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2018_05_01.aio.operations.DeploymentsOperations>`
* 2019-03-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2019_03_01.aio.operations.DeploymentsOperations>`
* 2019-05-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2019_05_01.aio.operations.DeploymentsOperations>`
* 2019-05-10: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2019_05_10.aio.operations.DeploymentsOperations>`
* 2019-07-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2019_07_01.aio.operations.DeploymentsOperations>`
* 2019-08-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2019_08_01.aio.operations.DeploymentsOperations>`
* 2019-10-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2019_10_01.aio.operations.DeploymentsOperations>`
* 2020-06-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2020_06_01.aio.operations.DeploymentsOperations>`
* 2020-10-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2020_10_01.aio.operations.DeploymentsOperations>`
* 2021-01-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2021_01_01.aio.operations.DeploymentsOperations>`
* 2021-04-01: :class:`DeploymentsOperations<azure.mgmt.resource.resources.v2021_04_01.aio.operations.DeploymentsOperations>`
"""
api_version = self._get_api_version('deployments')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2020-10-01':
from ..v2020_10_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2021-01-01':
from ..v2021_01_01.aio.operations import DeploymentsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import DeploymentsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'deployments'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def operations(self):
"""Instance depends on the API version:
* 2018-05-01: :class:`Operations<azure.mgmt.resource.resources.v2018_05_01.aio.operations.Operations>`
* 2019-03-01: :class:`Operations<azure.mgmt.resource.resources.v2019_03_01.aio.operations.Operations>`
* 2019-05-01: :class:`Operations<azure.mgmt.resource.resources.v2019_05_01.aio.operations.Operations>`
* 2019-05-10: :class:`Operations<azure.mgmt.resource.resources.v2019_05_10.aio.operations.Operations>`
* 2019-07-01: :class:`Operations<azure.mgmt.resource.resources.v2019_07_01.aio.operations.Operations>`
* 2019-08-01: :class:`Operations<azure.mgmt.resource.resources.v2019_08_01.aio.operations.Operations>`
* 2019-10-01: :class:`Operations<azure.mgmt.resource.resources.v2019_10_01.aio.operations.Operations>`
* 2020-06-01: :class:`Operations<azure.mgmt.resource.resources.v2020_06_01.aio.operations.Operations>`
* 2020-10-01: :class:`Operations<azure.mgmt.resource.resources.v2020_10_01.aio.operations.Operations>`
* 2021-01-01: :class:`Operations<azure.mgmt.resource.resources.v2021_01_01.aio.operations.Operations>`
* 2021-04-01: :class:`Operations<azure.mgmt.resource.resources.v2021_04_01.aio.operations.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2018-05-01':
from ..v2018_05_01.aio.operations import Operations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import Operations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations import Operations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations import Operations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import Operations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations import Operations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations import Operations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import Operations as OperationClass
elif api_version == '2020-10-01':
from ..v2020_10_01.aio.operations import Operations as OperationClass
elif api_version == '2021-01-01':
from ..v2021_01_01.aio.operations import Operations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def provider_resource_types(self):
"""Instance depends on the API version:
* 2020-10-01: :class:`ProviderResourceTypesOperations<azure.mgmt.resource.resources.v2020_10_01.aio.operations.ProviderResourceTypesOperations>`
* 2021-01-01: :class:`ProviderResourceTypesOperations<azure.mgmt.resource.resources.v2021_01_01.aio.operations.ProviderResourceTypesOperations>`
* 2021-04-01: :class:`ProviderResourceTypesOperations<azure.mgmt.resource.resources.v2021_04_01.aio.operations.ProviderResourceTypesOperations>`
"""
api_version = self._get_api_version('provider_resource_types')
if api_version == '2020-10-01':
from ..v2020_10_01.aio.operations import ProviderResourceTypesOperations as OperationClass
elif api_version == '2021-01-01':
from ..v2021_01_01.aio.operations import ProviderResourceTypesOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import ProviderResourceTypesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'provider_resource_types'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def providers(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2016_02_01.aio.operations.ProvidersOperations>`
* 2016-09-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2016_09_01.aio.operations.ProvidersOperations>`
* 2017-05-10: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2017_05_10.aio.operations.ProvidersOperations>`
* 2018-02-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2018_02_01.aio.operations.ProvidersOperations>`
* 2018-05-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2018_05_01.aio.operations.ProvidersOperations>`
* 2019-03-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2019_03_01.aio.operations.ProvidersOperations>`
* 2019-05-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2019_05_01.aio.operations.ProvidersOperations>`
* 2019-05-10: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2019_05_10.aio.operations.ProvidersOperations>`
* 2019-07-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2019_07_01.aio.operations.ProvidersOperations>`
* 2019-08-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2019_08_01.aio.operations.ProvidersOperations>`
* 2019-10-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2019_10_01.aio.operations.ProvidersOperations>`
* 2020-06-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2020_06_01.aio.operations.ProvidersOperations>`
* 2020-10-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2020_10_01.aio.operations.ProvidersOperations>`
* 2021-01-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2021_01_01.aio.operations.ProvidersOperations>`
* 2021-04-01: :class:`ProvidersOperations<azure.mgmt.resource.resources.v2021_04_01.aio.operations.ProvidersOperations>`
"""
api_version = self._get_api_version('providers')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2020-10-01':
from ..v2020_10_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2021-01-01':
from ..v2021_01_01.aio.operations import ProvidersOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import ProvidersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'providers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def resource_groups(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2016_02_01.aio.operations.ResourceGroupsOperations>`
* 2016-09-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2016_09_01.aio.operations.ResourceGroupsOperations>`
* 2017-05-10: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2017_05_10.aio.operations.ResourceGroupsOperations>`
* 2018-02-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2018_02_01.aio.operations.ResourceGroupsOperations>`
* 2018-05-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2018_05_01.aio.operations.ResourceGroupsOperations>`
* 2019-03-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2019_03_01.aio.operations.ResourceGroupsOperations>`
* 2019-05-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2019_05_01.aio.operations.ResourceGroupsOperations>`
* 2019-05-10: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2019_05_10.aio.operations.ResourceGroupsOperations>`
* 2019-07-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2019_07_01.aio.operations.ResourceGroupsOperations>`
* 2019-08-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2019_08_01.aio.operations.ResourceGroupsOperations>`
* 2019-10-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2019_10_01.aio.operations.ResourceGroupsOperations>`
* 2020-06-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2020_06_01.aio.operations.ResourceGroupsOperations>`
* 2020-10-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2020_10_01.aio.operations.ResourceGroupsOperations>`
* 2021-01-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2021_01_01.aio.operations.ResourceGroupsOperations>`
* 2021-04-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.resources.v2021_04_01.aio.operations.ResourceGroupsOperations>`
"""
api_version = self._get_api_version('resource_groups')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2020-10-01':
from ..v2020_10_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2021-01-01':
from ..v2021_01_01.aio.operations import ResourceGroupsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import ResourceGroupsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'resource_groups'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def resources(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2016_02_01.aio.operations.ResourcesOperations>`
* 2016-09-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2016_09_01.aio.operations.ResourcesOperations>`
* 2017-05-10: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2017_05_10.aio.operations.ResourcesOperations>`
* 2018-02-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2018_02_01.aio.operations.ResourcesOperations>`
* 2018-05-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2018_05_01.aio.operations.ResourcesOperations>`
* 2019-03-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2019_03_01.aio.operations.ResourcesOperations>`
* 2019-05-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2019_05_01.aio.operations.ResourcesOperations>`
* 2019-05-10: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2019_05_10.aio.operations.ResourcesOperations>`
* 2019-07-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2019_07_01.aio.operations.ResourcesOperations>`
* 2019-08-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2019_08_01.aio.operations.ResourcesOperations>`
* 2019-10-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2019_10_01.aio.operations.ResourcesOperations>`
* 2020-06-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2020_06_01.aio.operations.ResourcesOperations>`
* 2020-10-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2020_10_01.aio.operations.ResourcesOperations>`
* 2021-01-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2021_01_01.aio.operations.ResourcesOperations>`
* 2021-04-01: :class:`ResourcesOperations<azure.mgmt.resource.resources.v2021_04_01.aio.operations.ResourcesOperations>`
"""
api_version = self._get_api_version('resources')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2020-10-01':
from ..v2020_10_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2021-01-01':
from ..v2021_01_01.aio.operations import ResourcesOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import ResourcesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'resources'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def tags(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2016_02_01.aio.operations.TagsOperations>`
* 2016-09-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2016_09_01.aio.operations.TagsOperations>`
* 2017-05-10: :class:`TagsOperations<azure.mgmt.resource.resources.v2017_05_10.aio.operations.TagsOperations>`
* 2018-02-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2018_02_01.aio.operations.TagsOperations>`
* 2018-05-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2018_05_01.aio.operations.TagsOperations>`
* 2019-03-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2019_03_01.aio.operations.TagsOperations>`
* 2019-05-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2019_05_01.aio.operations.TagsOperations>`
* 2019-05-10: :class:`TagsOperations<azure.mgmt.resource.resources.v2019_05_10.aio.operations.TagsOperations>`
* 2019-07-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2019_07_01.aio.operations.TagsOperations>`
* 2019-08-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2019_08_01.aio.operations.TagsOperations>`
* 2019-10-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2019_10_01.aio.operations.TagsOperations>`
* 2020-06-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2020_06_01.aio.operations.TagsOperations>`
* 2020-10-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2020_10_01.aio.operations.TagsOperations>`
* 2021-01-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2021_01_01.aio.operations.TagsOperations>`
* 2021-04-01: :class:`TagsOperations<azure.mgmt.resource.resources.v2021_04_01.aio.operations.TagsOperations>`
"""
api_version = self._get_api_version('tags')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations import TagsOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2019-03-01':
from ..v2019_03_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations import TagsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2020-06-01':
from ..v2020_06_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2020-10-01':
from ..v2020_10_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2021-01-01':
from ..v2021_01_01.aio.operations import TagsOperations as OperationClass
elif api_version == '2021-04-01':
from ..v2021_04_01.aio.operations import TagsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'tags'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
async def close(self):
await self._client.close()
async def __aenter__(self):
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details):
await self._client.__aexit__(*exc_details)
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2011 by Researchstudio iSpace
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
import serial
import threading
import time
class FormatError(BaseException):
def __init__(self, msg):
self._msg = msg
def __str__(self):
return ("FormatError: {0}".format(self._msg))
class Event(object):
"""Event represents a notification event that holds all relevant information
of a sensor update.
Attributes:
- adr: The address of the sensor represented as integer.
Ranging from 0 to 7 if sensor is not a kombi sensor, 8
otherwise.
- timestamp: The Unix timestamp of the measurement.
- kombi: True if this sensor represents a kombi sensor, False
otherwise.
- changed: True if anything changed regarding the last state of the
sensor.
- event_type: The type of the event. Represents one of WDE1.SENSOR_*.
- temperature: The current temperature of the wheather sensor.
- humidity: The current humidity of the wheather sensor.
- windspeed: The current windspeed if the event represents a kombi
sensor.
- raincycles: The current raincycles if the event represents a kombi
sensor.
- rain: True if the sensor is a kombi one and it is raining,
False otherwise.
"""
pass
class _Sensor(object):
def __init__(self, adr, kombi=False):
self._values = None
if not kombi:
self._values = {
"temperature": None,
"humidity": None,
}
else:
self._values = {
"temperature": None,
"humidity": None,
"windspeed": None,
"raincycles": None,
"rain": None
}
self._kombi = kombi
self._adr = adr
self._changed = False
self._timestamp = int(time.time())
self._event_type = WDE1.SENSOR_UNREACHABLE
def __setattr__(self, key, value):
if key not in ["temperature", "humidity", "windspeed", "raincycles",
"rain"]:
object.__setattr__(self, key, value)
else:
if self._values[key] == None and value != None:
self._event_type = WDE1.SENSOR_AVAILABLE
if self._values[key] != None and value == None:
self._event_type = WDE1.SENSOR_UNREACHABLE
if self._values[key] != None and value != None:
self._event_type = WDE1.SENSOR_UPDATE
self._timestamp = int(time.time())
if self._values[key] != value:
self._changed = True
self._values[key] = value
def get_event(self):
e = Event()
for key in self._values:
setattr(e, key, self._values[key])
e.timestamp = self._timestamp
e.adr = self._adr
e.changed = self._changed
e.event_type = self._event_type
e.kombi = self._kombi
self._changed = False
return e
class WDE1(threading.Thread):
ADR_KOMBI = 8
SENSOR_AVAILABLE = "SENSOR_AVAILABLE"
SENSOR_UPDATE = "SENSOR_UPDATE"
SENSOR_UNREACHABLE = "SENSOR_UNREACHABLE"
NOTIFY_CHANGE = 1
NOTIFY_ALL = 2
def __init__(self, port):
threading.Thread.__init__(self)
self.daemon = True
self._observers = []
self._observers_all = []
self._sensors2 = [_Sensor(i) for i in range(8)]
self._sensors2.append(_Sensor(8, kombi=True))
self._version = None
self.ser = serial.Serial(port)
def add_observer(self, fn, adr=None, notify=NOTIFY_CHANGE):
if notify == WDE1.NOTIFY_CHANGE:
if not (fn,adr) in self._observers:
self._observers.append((fn,adr))
else:
if not (fn,adr) in self._observers_all:
self._observers_all.append((fn,adr))
@property
def observers_change(self):
return self._observers
@property
def observers_all(self):
return self._observers_all
@property
def version(self):
if not self._version:
self._version = self._get_version()
return self._version
def _get_version(self):
if not self.ser.isOpen():
self.ser.open()
self.ser.flushInput
self.ser.flushOutput()
self.ser.write("?")
self.ser.readline() # empty line
line2 = self.ser.readline()
self.ser.readline() # baud rate
#TODO: error handling
matches = re.match("^ELV USB-WDE1 v([0-9.]+)", line2)
return matches.group(1)
def _parse_line(self, raw):
matches = re.search(
"^\$1;1;;" +
8 * "([-0-9,]*);" +
8 * "([0-9]{0,2});" +
"([-0-9,]*);([0-9]{0,2});([0-9]{0,3},?[0-9]?);([0-9]{0,4});([01]{0,1});0\r\n$", raw
)
if not matches:
# currently only OpenLog format is supported
# TODO: should switch format here
raise FormatError("currently only OpenLog format is supported")
# substitute "" by None
values = [x if x != "" else None for x in matches.groups()]
# first 8 values are floats per spec (temperature)
values[0:8] = [float(x.replace(",", ".")) if x else None for x in
values[0:8]]
# second 8 values are ints per spec (humidity)
values[8:16] = [int(x) if x else None for x in values[8:16]]
# rest is float,int,float,int,bool per spec (kombi sensor)
values[16] = float(values[16].replace(",", ".")) if values[16] else None
values[17] = int(values[17]) if values[17] else None
values[18] = float(values[18].replace(",",".")) if values[18] else None
values[19] = int(values[19]) if values[19] else None
values[20] = True if values[20] == "1" else False
return values
def _update_state(self, values):
for i in range(0, 8):
self._sensors2[i].temperature = values[i]
self._sensors2[i].humidity = values[i+8]
self._sensors2[WDE1.ADR_KOMBI].temperature = values[16]
self._sensors2[WDE1.ADR_KOMBI].humidity = values[17]
self._sensors2[WDE1.ADR_KOMBI].windspeed = values[18]
self._sensors2[WDE1.ADR_KOMBI].raincycles = values[19]
self._sensors2[WDE1.ADR_KOMBI].rain = values[20]
def _notify(self):
for key in range(0, 9):
e = self._sensors2[key].get_event()
if e.changed:
for (obs,adr) in self._observers:
if adr == None or adr == key:
t= threading.Thread(target=obs,
args=(self,e))
t.start()
for (obs,adr) in self._observers_all:
if adr == None or adr == key:
t= threading.Thread(target=obs,
args=(self,e))
t.start()
def start_reading(self,blocking=True):
self._run = True
if blocking:
self._do_run()
else:
self.start()
def _do_run(self):
self.ser.open()
line = self.ser.readline()
while self._run:
values = self._parse_line(line)
self._update_state(values)
self._notify()
line = self.ser.readline()
def run(self):
self._do_run()
def close(self):
self._run = False
self.ser.close()
| |
import json
from semNets.Topology import Topology
from semNets.Primitives import Node, Relation, RelationAttributeType, RelationType, Attribute
def parseLog(file, parent):
counter = 0
current_linenumber = 0
test = ""
top = Topology()
top.setParent(parent)
#### utilities ####
utilities = {
# counters
"counters" : {
"friendlyPlayerCounter": 0,
"hostilePlayerCounter": 0,
"minionCount": 0,
"hearthstone_internal_id_count": 0,
"Location_Friendly_Deck_count": 0,
"Location_Friendly_Hand_count": 0,
"Location_Friendly_Board_count": 0,
"Location_Friendly_Graveyard_count": 0,
"Location_Hostile_Deck_count": 0,
"Location_Hostile_Hand_count": 0,
"Location_Hostile_Board_count": 0,
"Location_Hostile_Graveyard_count": 0,
"Location_Friendly_Play_Weapon_count": 0,
"Location_Hostile_Play_Weapon_count": 0,
"Location_Hostile_Secrets_count": 0,
"Location_Friendly_Secrets_count": 0
},
# heroes
"heroes" : {
"Jaina Proudmoore": "Mage",
"Rexxar": "Hunter",
"Uther Lightbringer": "Paladin",
"Garrosh Hellscream": "Warrior",
"Malfurion Stormrage": "Druid",
"Gul'dan": "Warlock",
"Thrall": "Shaman",
"Anduin Wrynn": "Priest",
"Valeera Sanguinar": "Rogue"
},
# zones
"zones" : {
"FRIENDLY DECK": "Location_Friendly_Deck",
"FRIENDLY HAND": "Location_Friendly_Hand",
"FRIENDLY PLAY": "Location_Friendly_Board",
"FRIENDLY GRAVEYARD": "Location_Friendly_Graveyard",
"OPPOSING DECK": "Location_Hostile_Deck",
"OPPOSING HAND": "Location_Hostile_Hand",
"OPPOSING PLAY": "Location_Hostile_Board",
"OPPOSING GRAVEYARD": "Location_Hostile_Graveyard",
"FRIENDLY PLAY (Hero)": "Location_Friendly_Play_Hero",
"FRIENDLY PLAY (Hero Power)": "Location_Friendly_Play_Hero_Power",
"FRIENDLY PLAY (Weapon)": "Location_Friendly_Play_Weapon",
"FRIENDLY SECRET": "Location_Friendly_Secrets",
"OPPOSING PLAY (Hero)": "Location_Hostile_Play_Hero",
"OPPOSING PLAY (Hero Power)": "Location_Hostile_Play_Hero_Power",
"OPPOSING PLAY (Weapon)": "Location_Hostile_Play_Weapon",
"OPPOSING SECRET": "Location_Hostile_Secrets"
},
"current_locations": {
"Location_Friendly_Deck": None,
"Location_Friendly_Hand": None,
"Location_Friendly_Board": None,
"Location_Friendly_Graveyard": None,
"Location_Hostile_Deck": None,
"Location_Hostile_Hand": None,
"Location_Hostile_Board": None,
"Location_Hostile_Graveyard": None
},
"current_locations_filled" : {
"Location_Friendly_Deck": [],
"Location_Friendly_Hand": [],
"Location_Friendly_Board": [],
"Location_Friendly_Graveyard": [],
"Location_Hostile_Deck": [],
"Location_Hostile_Hand": [],
"Location_Hostile_Board": [],
"Location_Hostile_Graveyard": []
},
# this dictionary maps a minion_id to an actual minion node, which may change every turn
# a was_a relation is created if the minion changes
"minion_history" : {},
# this dictionary maps a minion_id to its type
"minion_type" : {},
# this dictionary maps a minion to their current health in order to calculate it more easily
"minion_health" : {},
"minion_damage" : {},
### utilities ###
"current_player" : -1,
"zones_used_in_this_turn" : {},
"current_turn" : 1,
"current_turn_node" : None,
"current_subturn" : 0,
"current_subturn_node" : None
}
# initialization
game0 = createNode(top, "Game0")
createRelation(top, game0, "is_a", getNode(top, "Game"))
#this is the initial turn
gameSetup = createNode(top, "GameSetup")
createRelation(top, gameSetup, "is_a", getNode(top, "Turn"))
utilities["current_turn_node"] = gameSetup
subturn0 = createNode(top, "SubTurn_0")
createRelation(top, subturn0, "is_a", getNode(top, "Subturn"))
createRelation(top, gameSetup, "has", subturn0)
utilities["current_subturn_node"] = subturn0
utilities["current_subturn"] = 1
friendlyPlayer = createNode(top, "FriendlyPlayer_{0}".format(utilities["counters"]["friendlyPlayerCounter"]))
hostilePlayer = createNode(top, "HostilePlayer_{0}".format(utilities["counters"]["hostilePlayerCounter"]))
friendly_player_node = getNode(top, "Friendly_Player")
hostile_player_node = getNode(top, "Hostile_Player")
createRelation(top, friendlyPlayer, "is_a", friendly_player_node)
createRelation(top, hostilePlayer, "is_a", hostile_player_node)
createRelation(top, game0, "has", friendlyPlayer)
createRelation(top, game0, "has", hostilePlayer)
nextLine, phrase, current_linenumber = goToNextPhrases(file, ["TRANSITIONING"], current_linenumber)
minion_type = ""
# set up of initial game state
# detect initialization end: when "The Coin" is transitioned the initialization is over
while nextLine != -1 and minion_type != "The Coin":
minionCount = utilities["counters"]["minionCount"]
nextLine = nextLine.split("TRANSITIONING card [", 1)[1]
lineEnd = nextLine.split("] to ", 1)[1][:-1]
valuePairs = parseValues_Transitioning(nextLine)
minion_id = valuePairs["id"]
minion_node_name = "Minion_{0}".format(minionCount)
minion_node = getOrCreateNode(top,minion_node_name)
hearthstone_internal_card_id_node = createNode(top, "hearthstone_internal_card_id_{0}".format(minion_id))
createRelation(top, minion_node, "has", hearthstone_internal_card_id_node)
#TODO link to static network -> hearthstone_internal_card_id_XX - is_a - hearthstone_internal_card_id
#add minion to the minion_history dict
utilities["minion_history"][minion_id] = minion_node
if(top.existsNodeByName("{0}_0".format(utilities["zones"][lineEnd]))):
targetZone = top.getNodeByName("{0}_0".format(utilities["zones"][lineEnd]))
else:
targetZone = createNode(top, "{0}_0".format(utilities["zones"][lineEnd]))
targetZone_node = getNode(parent, utilities["zones"][lineEnd])
createRelation(top, targetZone, "is_a", targetZone_node)
#update counter
utilities["counters"][utilities["zones"][lineEnd]+"_count"] = 1
createRelation(top, targetZone, "has", minion_node)
if "name" in valuePairs.keys():
minion_type = valuePairs["name"]
minion_type_node = getNode(parent, minion_type)
if minion_type_node is None:
minion_type_node = getNode(parent, utilities["heroes"][minion_type])
minion_health = 30
utilities["minion_health"][minion_id] = minion_health
utilities["minion_damage"][minion_id] = 0
minion_curhealth_rel = createRelation(top, minion_node, "has", getNode(parent, "CurHealth"))
minion_curhealth_rel.createAttribute(RelationAttributeType("amount"), minion_health)
else:
minion_health_rel = getRelation(parent, minion_type, "has", "MaxHealth")
if minion_health_rel is None:
#TODO: make it a spell
print("Minion '{}' has no maxhealth. -> is no minion".format(minion_type))
else:
assert (minion_health_rel is not None, "{0} has no attribute 'MaxHealth'!")
minion_health = minion_health_rel.getAttributeValue(RelationAttributeType("amount"))
utilities["minion_health"][minion_id] = minion_health
utilities["minion_damage"][minion_id] = 0
createRelation(top, minion_node, "has", getNode(parent, "CurHealth"), RelationAttributeType("amount"), minion_health)
utilities["minion_type"][minion_id] = minion_type_node
createRelation(top, minion_node, "is_a", minion_type_node)
else:
utilities["minion_type"][minion_id] = None
minionCount = minionCount + 1
utilities["counters"]["minionCount"] = minionCount
if minion_type == "The Coin":
print("The Coin passed")
nextLine, phrase, current_linenumber = goToNextPhrases(file, ["TRANSITIONING"], current_linenumber)
nextLine, phrase, current_linenumber = goToNextPhrases(file, ["TRANSITIONING", "BlockType=ATTACK", "tag=DAMAGE"], current_linenumber)
#normal game play starts here
while nextLine != -1 and "CREATE_GAME" not in nextLine:
utilities, top = functions[phrase](nextLine, utilities, top)
nextLine, phrase, current_linenumber = goToNextPhrases(file, ["TRANSITIONING", "BlockType=ATTACK", "tag=DAMAGE", "CREATE_GAME"], current_linenumber)
return top
def goToNextPhrases(file, phrases, current_linenumber):
for i, line in enumerate(file, 1):
print("Line {}".format(current_linenumber))
current_linenumber += 1
for phrase in phrases:
if "PowerTaskList.DebugPrintPower()" in line:
break
if phrase in line:
return line, phrase, current_linenumber
return -1, "", current_linenumber
def ProcessPhrase_Transition(org_line, utilities, topology):
line = org_line.split("TRANSITIONING card [", 1)[1]
lineEnd = line.split("] to ", 1)[1][:-1]
while lineEnd == "":
return utilities, topology
zone = utilities["zones"][lineEnd]
valuePairs = parseValues_Transitioning(line)
new_player = valuePairs["player"]
#check if new Turn is needed
if new_player != utilities["current_player"]:
utilities, topology = startNewTurn(utilities, topology)
#since transitions are also subturns, create new subturn
newSubturn = createNode(topology, "SubTurn_{}".format(utilities["current_subturn"]))
createRelation(topology, newSubturn, "is_a", getNode(topology, "Subturn"))
if not utilities["current_subturn_node"] is None:
createRelation(topology, newSubturn, "was", utilities["current_subtturn_node"])
utilities["current_subturn"] += 1
utilities["current_subturn_node"] = newSubturn
minionCount = utilities["counters"]["minionCount"]
utilities["counters"]["minionCount"] = minionCount + 1
minion_id = valuePairs["id"]
minion_node_name = "Minion_{0}".format(minionCount)
minion_node = getNode(topology, minion_node_name)
if minion_node is None:
minion_node = createNode(topology, minion_node_name)
hearthstone_internal_card_id_node = getOrCreateNode(topology, "hearthstone_internal_card_id_{0}".format(minion_id))
createRelation(topology, minion_node, "has", hearthstone_internal_card_id_node)
# create relation to previous state of the minion if it existed before
if minion_id in utilities["minion_history"].keys():
previous = utilities["minion_history"][minion_id]
createRelation(topology, minion_node, "was", previous)
utilities["minion_history"][minion_id] = minion_node
else:
utilities["minion_history"][minion_id] = minion_node
# create new Zone instance if neccessary
current_zone_name = "{0}_{1}".format(zone, utilities["counters"][zone + "_count"])
if current_zone_name in utilities["zones_used_in_this_turn"].keys():
targetZone = utilities["zones_used_in_this_turn"][current_zone_name]
else:
targetZone = createNode(topology, current_zone_name)
pastZone = getNode(topology, "{0}_{1}".format(zone, utilities["counters"][zone + "_count"] - 1))
if pastZone is None:
parentZone = getNode(topology.parent, utilities["zones"][lineEnd])
createRelation(topology, targetZone, "is_a", parentZone)
else:
createRelation(topology, targetZone, "was", pastZone)
# update counter
utilities["counters"][zone + "_count"] = utilities["counters"][zone + "_count"] + 1
utilities["zones_used_in_this_turn"][current_zone_name] = targetZone
createRelation(topology, targetZone, "has", minion_node)
#associate targetZone with current subturn
createRelation(topology, newSubturn, "has", targetZone)
#update the current_locations dicts
if not zone in ["Location_Friendly_Play_Hero", "Location_Hostile_Play_Hero", "Location_Hostile_Play_Hero_Power", "Location_Friendly_Play_Hero_Power", "Location_Friendly_Play_Weapon", "Location_Hostile_Play_Weapon", "Location_Friendly_Secrets", "Location_Hostile_Secrets"] :
utilities["current_locations_filled"][zone].append(minion_node)
utilities["current_locations"][zone] = targetZone
if "name" in valuePairs.keys():
minion_type = valuePairs["name"]
minion_type_node = getNode(topology.parent, minion_type)
if minion_type_node is None:
minion_type_node = getNode(topology.parent, utilities["heroes"][minion_type])
createRelation(topology, minion_node, "is_a", minion_type_node)
# when the minion is not in the dict "minion_type", then it must be a new one, that hasen't been in the game yet.
if minion_id not in utilities["minion_type"].keys() or utilities["minion_type"][minion_id] is None :
utilities["minion_type"][minion_id] = minion_type_node
#get the relation that indicates how much maxhealth a minion has (from static network)
minion_maxhealth_rel = getRelation(topology.parent, minion_type, "has", "MaxHealth")
# this relation should exist otherwise -> error
if minion_maxhealth_rel is None:
print("minion '{}' has no maxhealth -> is no minion".format(minion_type))
else:
assert (minion_maxhealth_rel is not None, "{0} has no attribute 'MaxHealth'!")
# now get the actual amount of health from the relation and set it in the minion_health dict
minion_health = minion_maxhealth_rel.getAttributeValue(RelationAttributeType("amount"))
utilities["minion_health"][minion_id] = minion_health
utilities["minion_damage"][minion_id] = 0
#create relation from current minion node to CurHealth (static node)
minion_curhealth_rel = createRelation(topology, minion_node, "has", getNode(topology.parent, "CurHealth"))
#this relation should have the current health, i.e. the maxhealth at this point, as an amount
minion_curhealth_rel.createAttribute(RelationAttributeType("amount"), minion_health)
return utilities, topology
def ProcessPhrase_Attack(org_line, utilities, topology ):
line = org_line.split("BLOCK_START BlockType=ATTACK ", 1)[1]
valuePairs = parseValues_Nested(line)
source_minion_id = valuePairs["Entity"]["id"]
target_minion_id = valuePairs["Target"]["id"]
if source_minion_id not in utilities["minion_history"].keys():
#TODO: what about Lord Jaraxxus? The minion_id doesn't show up, because it changes mid game in an attack block(e.g. from 35 to 98)
print("source_minion_id {0} not in minion_history!".format(source_minion_id))
return utilities, topology
source_minion_node_old = utilities["minion_history"][source_minion_id]
target_minion_node_old = utilities["minion_history"][target_minion_id]
currentMinion_count = utilities["counters"]["minionCount"]
source_minion_node_new = createNode(topology, "Minion_{}".format(currentMinion_count))
currentMinion_count += 1
target_minion_node_new = createNode(topology, "Minion_{}".format(currentMinion_count))
utilities["counters"]["minionCount"] = currentMinion_count + 1
createRelation(topology, source_minion_node_new, "was", source_minion_node_old)
createRelation(topology, target_minion_node_new, "was", target_minion_node_old)
utilities["minion_history"][source_minion_id] = source_minion_node_new
utilities["minion_history"][target_minion_id] = target_minion_node_new
new_player = valuePairs["Entity"]["player"]
if new_player != utilities["current_player"]:
utilities, topology = startNewTurn(utilities, topology)
newSubturn = createNode(topology, "SubTurn_{}".format(utilities["current_subturn"]))
utilities["current_subturn"] += 1
createRelation(topology, newSubturn, "is_a", getNode(topology.parent, "Subturn"))
if not utilities["current_subturn_node"] is None:
createRelation(topology, newSubturn, "was", utilities["current_subturn_node"])
utilities["current_subturn_node"] = newSubturn
createRelation(topology, newSubturn, "has", getNode(topology.parent, "Action_MinionAttack"))
createRelation(topology, newSubturn, "has", source_minion_node_new)
createRelation(topology, source_minion_node_new, "is_a", getNode(topology.parent, "Action_MinionAttack_Source"))
createRelation(topology, newSubturn, "has", target_minion_node_new)
createRelation(topology, target_minion_node_new, "is_a", getNode(topology.parent, "Action_MinionAttack_Target"))
return utilities, topology
def ProcessPhrase_Damage(org_line, utilities, topology ):
if "processing" in org_line:
return utilities, topology
line = org_line.split("TAG_CHANGE ", 1)[1]
valuePairs = parseValues_Nested(line)
if "Entity" not in valuePairs.keys():
print("ERROR")
new_player = valuePairs["Entity"]["player"]
if new_player != utilities["current_player"]:
utilities, topology = startNewTurn(utilities, topology)
minion_id = valuePairs["Entity"]["id"]
minion_type = valuePairs["Entity"]["name"]
if minion_id not in utilities["minion_history"].keys():
print("WARINING: tag change on nonexistend minion {}".format(minion_type))
minion_node = utilities["minion_history"][minion_id]
damage = int(valuePairs["value"])
health = utilities["minion_health"][minion_id]
utilities["minion_damage"][minion_id] = damage
createRelation(topology, minion_node, "has", getNode(topology.parent, "CurHealth"), "amount", health - damage )
return utilities, topology
def ProcessPhrase_Health(org_line, utlities, topology):
return
def startNewTurn(utilities, topology):
# new turn starts
newTurn = createNode(topology, "Turn_{}".format(utilities["current_turn"]))
utilities["current_turn"] += 1
createRelation(topology, newTurn, "is_a", getNode(topology, "Turn"))
createRelation(topology, newTurn, "was", utilities["current_turn_node"])
utilities["current_turn_node"] = newTurn
for location, location_node in utilities["current_locations"].items():
# create new location, that is logically just a copy of the location in the las subturn,
# however it will have a relation to every minion associated with this location at the time of the turn
location_count = utilities["counters"]["{0}_count".format(location)]
new_location = createNode(topology, "{0}_{1}".format(location, location_count))
utilities["counters"]["{0}_count".format(location)] = location_count + 1
if not location_node is None:
createRelation(topology, new_location, "was", location_node)
# update the utilities section for the current location, so that future subturns reference this new location
utilities["current_locations"][location] = new_location
for minion in utilities["current_locations_filled"][location]:
createRelation(topology, new_location, "has", minion)
createRelation(topology, newTurn, "has", new_location)
utilities["current_subturn_node"] = None
utilities["zones_used_in_this_turn"].clear()
return utilities, topology
def parseValues_segment(segment):
dictionary = {}
value = ""
key = ""
keybuffer = ""
for char in list(segment):
if char is "=":
for c in reversed(value):
if c is " ":
value = value[:-1]
break
keybuffer = c + keybuffer
value = value[:-1]
if not value == "":
if value.lstrip("-").isdigit():
value = int(value)
dictionary[key] = value
key = keybuffer
keybuffer = ""
value = ""
else:
value += char
if value.lstrip("-").isdigit():
value = int(value)
if not key is "" or not value is "":
dictionary[key] = value
return dictionary
def parseValues_Transitioning(segment):
return parseValues_segment(segment)
def parseValues_Nested(segment):
dictionary = {}
value = ""
key = ""
keybuffer = ""
index = 0
valuedict = {}
peeking = False
peekchar = ""
peekbuffer = ""
for char in list(segment):
if peeking:
if char == "]":
dictionary[key] = parseValues_segment(peekbuffer)
peekbuffer = ""
key = ""
peeking = False
else:
peekbuffer += char
elif char == "[":
peeking = True
elif char is "=":
for c in reversed(value):
if c is " ":
value = value[:-1]
break
keybuffer = c + keybuffer
value = value[:-1]
if not value == "":
if value.lstrip("-").isdigit():
value = int(value)
dictionary[key] = value
key = keybuffer
keybuffer = ""
value = ""
else:
value += char
index += 1
if value.lstrip("-").isdigit():
value = int(value)
if not key is "" or not value is "":
dictionary[key] = value
return dictionary
def createNode(topology, nodeName):
n = Node(nodeName)
if topology.existsNode(n):
AssertionError("Node {0} is already existing in the topology or its parent".format(n.name))
topology.insertNode(n)
return n
def getNode(topology, nodeName):
if not topology.existsNodeByName(nodeName):
return None
return topology.getNodeByName(nodeName)
def getRelation(topology, source, relationtype, target):
for relation in topology.relations:
if relation.source == Node(source) and relation.target == Node(target) and relation.type == RelationType(relationtype):
return relation
return None
def getOrCreateNode(topology, nodeName):
n = getNode(topology, nodeName)
if n is not None:
return n
else:
return createNode(topology, nodeName)
def createRelation(topology, source, relationType, target, attributeType = None, attributeValue = None):
relationtype = RelationType(relationType)
relation = Relation(relationtype, source, target)
if attributeType != None:
relationAttributeType = RelationAttributeType(attributeType)
relation.createAttribute(relationAttributeType, attributeValue)
topology.insertRelation(relation)
return relation
functions = {
"TRANSITIONING" : ProcessPhrase_Transition,
"BlockType=ATTACK" : ProcessPhrase_Attack,
"tag=DAMAGE" : ProcessPhrase_Damage
}
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for supervisor.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
def _summary_iterator(test_dir):
"""Reads events from test_dir/events.
Args:
test_dir: Name of the test directory.
Returns:
A summary_iterator
"""
event_paths = glob.glob(os.path.join(test_dir, "event*"))
return tf.train.summary_iterator(event_paths[-1])
class SupervisorTest(tf.test.TestCase):
def _TestDir(self, test_name):
test_dir = os.path.join(tf.test.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
return test_dir
# This test does not test much.
def testBasics(self):
logdir = self._TestDir("basics")
with tf.Graph().as_default():
my_op = tf.constant(1.0)
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
for _ in xrange(10):
sess.run(my_op)
sess.close()
sv.stop()
def testManagedSession(self):
logdir = self._TestDir("managed_session")
with tf.Graph().as_default():
my_op = tf.constant(1.0)
sv = tf.train.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for _ in xrange(10):
sess.run(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
def testManagedSessionUserError(self):
logdir = self._TestDir("managed_user_error")
with tf.Graph().as_default():
my_op = tf.constant(1.0)
sv = tf.train.Supervisor(logdir=logdir)
last_step = None
with self.assertRaisesRegexp(RuntimeError, "failing here"):
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 1:
raise RuntimeError("failing here")
else:
sess.run(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
self.assertEqual(1, last_step)
def testManagedSessionIgnoreOutOfRangeError(self):
logdir = self._TestDir("managed_out_of_range")
with tf.Graph().as_default():
my_op = tf.constant(1.0)
sv = tf.train.Supervisor(logdir=logdir)
last_step = None
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 3:
raise tf.errors.OutOfRangeError(my_op.op.node_def, my_op.op,
"all done")
else:
sess.run(my_op)
# Supervisor has been stopped. OutOfRangeError was not thrown.
self.assertTrue(sv.should_stop())
self.assertEqual(3, last_step)
def testManagedSessionDoNotKeepSummaryWriter(self):
logdir = self._TestDir("managed_not_keep_summary_writer")
with tf.Graph().as_default():
summ = tf.scalar_summary(["c1", "c2", "c3"], tf.constant([1.0, 2.0, 3.0]))
sv = tf.train.Supervisor(logdir=logdir, summary_op=None)
with sv.managed_session("", close_summary_writer=True,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
with sv.managed_session("", close_summary_writer=True,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# The summary iterator should report the summary once as we closed
# the summary writer across the 2 sessions.
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
self.assertTrue(ev.graph_def)
# The next one should have the values from the summary.
# But only once.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(tf.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testManagedSessionKeepSummaryWriter(self):
logdir = self._TestDir("managed_keep_summary_writer")
with tf.Graph().as_default():
summ = tf.scalar_summary(["c1", "c2", "c3"], tf.constant([1.0, 2.0, 3.0]))
sv = tf.train.Supervisor(logdir=logdir)
with sv.managed_session("", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
with sv.managed_session("", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Now close the summary writer to flush the events.
sv.summary_writer.close()
# The summary iterator should report the summary twice as we reused
# the same summary writer across the 2 sessions.
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
self.assertTrue(ev.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should also have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def _csv_data(self, logdir):
# Create a small data file with 3 CSV records.
data_path = os.path.join(logdir, "data.csv")
with open(data_path, "w") as f:
f.write("1,2,3\n")
f.write("4,5,6\n")
f.write("7,8,9\n")
return data_path
def testManagedEndOfInputOneQueue(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from a single queue.
logdir = self._TestDir("managed_end_of_input_one_queue")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with tf.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = tf.train.string_input_producer([data_path], num_epochs=3)
reader = tf.TextLineReader()
_, csv = reader.read(filename_queue)
rec = tf.decode_csv(csv, record_defaults=[[1], [1], [1]])
sv = tf.train.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(rec)
def testManagedEndOfInputTwoQueues(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from two queues, the second
# one producing a batch from the first one.
logdir = self._TestDir("managed_end_of_input_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with tf.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = tf.train.string_input_producer([data_path], num_epochs=3)
reader = tf.TextLineReader()
_, csv = reader.read(filename_queue)
rec = tf.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = tf.train.shuffle_batch(rec, 1, 6, 4)
sv = tf.train.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(shuff_rec)
def testManagedMainErrorTwoQueues(self):
# Tests that the supervisor correctly raises a main loop
# error even when using multiple queues for input.
logdir = self._TestDir("managed_main_error_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with self.assertRaisesRegexp(RuntimeError, "fail at step 3"):
with tf.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = tf.train.string_input_producer([data_path],
num_epochs=3)
reader = tf.TextLineReader()
_, csv = reader.read(filename_queue)
rec = tf.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = tf.train.shuffle_batch(rec, 1, 6, 4)
sv = tf.train.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for step in range(9):
if sv.should_stop():
break
elif step == 3:
raise RuntimeError("fail at step 3")
else:
sess.run(shuff_rec)
def testSessionConfig(self):
logdir = self._TestDir("session_config")
with tf.Graph().as_default():
with tf.device("/cpu:1"):
my_op = tf.constant([1.0])
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session(
"", config=tf.ConfigProto(device_count={"CPU": 2}))
for _ in xrange(10):
sess.run(my_op)
sess.close()
sv.stop()
def testChiefCanWriteEvents(self):
logdir = self._TestDir("can_write")
with tf.Graph().as_default():
summ = tf.scalar_summary(["c1", "c2", "c3"], tf.constant([1.0, 2.0, 3.0]))
sv = tf.train.Supervisor(is_chief=True, logdir=logdir, summary_op=None)
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = tf.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(tf.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNonChiefCannotWriteEvents(self):
def _summary_computed():
with tf.Graph().as_default():
sv = tf.train.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
summ = tf.scalar_summary(["c1", "c2"], tf.constant([1.0, 2.0]))
sv.summary_computed(sess, sess.run(summ))
def _start_standard_services():
with tf.Graph().as_default():
sv = tf.train.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
sv.start_standard_services(sess)
self.assertRaises(RuntimeError, _summary_computed)
self.assertRaises(RuntimeError, _start_standard_services)
def testNoLogdirButWantSummary(self):
with tf.Graph().as_default():
const = tf.constant([1.0, 2.0, 3.0])
summ = tf.scalar_summary(["c1", "c2", "c3"], const)
sv = tf.train.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
def testLogdirButExplicitlyNoSummaryWriter(self):
logdir = self._TestDir("explicit_no_summary_writer")
with tf.Graph().as_default():
tf.Variable([1.0], name="foo")
const = tf.constant([1.0, 2.0, 3.0])
summ = tf.scalar_summary(["c1", "c2", "c3"], const)
sv = tf.train.Supervisor(logdir=logdir, summary_writer=None)
sess = sv.prepare_or_wait_for_session("")
# Check that a checkpoint is still be generated.
self._wait_for_glob(sv.save_path, 3.0)
# Check that we cannot write a summary
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
def testNoLogdirButExplicitSummaryWriter(self):
logdir = self._TestDir("explicit_summary_writer")
with tf.Graph().as_default():
const = tf.constant([1.0, 2.0, 3.0])
summ = tf.scalar_summary(["c1", "c2", "c3"], const)
sw = tf.train.SummaryWriter(logdir)
sv = tf.train.Supervisor(logdir="", summary_op=None, summary_writer=sw)
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# Check the summary was written to 'logdir'
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = tf.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(tf.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNoLogdirSucceeds(self):
with tf.Graph().as_default():
tf.Variable([1.0, 2.0, 3.0])
sv = tf.train.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
sess.close()
sv.stop()
def testUseSessionManager(self):
with tf.Graph().as_default():
tf.Variable([1.0, 2.0, 3.0])
sm = tf.train.SessionManager()
# Pass in session_manager. The additional init_op is ignored.
sv = tf.train.Supervisor(logdir="", session_manager=sm)
sv.prepare_or_wait_for_session("")
def testInitOp(self):
logdir = self._TestDir("default_init_op")
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0])
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitFn(self):
logdir = self._TestDir("default_init_op")
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0])
def _init_fn(sess):
sess.run(v.initializer)
sv = tf.train.Supervisor(logdir=logdir, init_op=None, init_fn=_init_fn)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitOpWithFeedDict(self):
logdir = self._TestDir("feed_dict_init_op")
with tf.Graph().as_default():
p = tf.placeholder(tf.float32, shape=(3,))
v = tf.Variable(p, name="v")
sv = tf.train.Supervisor(logdir=logdir,
init_op=tf.initialize_all_variables(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testLocalInitOp(self):
logdir = self._TestDir("default_local_init_op")
with tf.Graph().as_default():
# A local variable.
v = tf.Variable([1.0, 2.0, 3.0],
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
# An entity which is initialized through a TABLE_INITIALIZER.
w = tf.Variable([4, 5, 6], trainable=False, collections=[])
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, w.initializer)
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(tf.all_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = tf.train.Supervisor(logdir=logdir, init_op=None)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
self.assertAllClose([4, 5, 6], sess.run(w))
sv.stop()
def testLocalInitOpForNonChief(self):
logdir = self._TestDir("default_local_init_op_non_chief")
with tf.Graph().as_default():
with tf.device("/job:localhost"):
# A local variable.
v = tf.Variable([1.0, 2.0, 3.0],
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(tf.all_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = tf.train.Supervisor(logdir=logdir, init_op=None, is_chief=False)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitOpFails(self):
server = tf.train.Server.create_local_server()
logdir = self._TestDir("default_init_op_fails")
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0], name="v")
tf.Variable([4.0, 5.0, 6.0], name="w")
# w will not be initialized.
sv = tf.train.Supervisor(logdir=logdir, init_op=v.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
def testInitOpFailsForTransientVariable(self):
server = tf.train.Server.create_local_server()
logdir = self._TestDir("default_init_op_fails_for_local_variable")
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0], name="v",
collections=[tf.GraphKeys.LOCAL_VARIABLES])
tf.Variable([1.0, 2.0, 3.0], name="w",
collections=[tf.GraphKeys.LOCAL_VARIABLES])
# w will not be initialized.
sv = tf.train.Supervisor(logdir=logdir, local_init_op=v.initializer)
with self.assertRaisesRegexp(
RuntimeError, "Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
def testSetupFail(self):
logdir = self._TestDir("setup_fail")
with tf.Graph().as_default():
tf.Variable([1.0, 2.0, 3.0], name="v")
with self.assertRaisesRegexp(ValueError, "must have their device set"):
tf.train.Supervisor(logdir=logdir, is_chief=False)
with tf.Graph().as_default(), tf.device("/job:ps"):
tf.Variable([1.0, 2.0, 3.0], name="v")
tf.train.Supervisor(logdir=logdir, is_chief=False)
def testDefaultGlobalStep(self):
logdir = self._TestDir("default_global_step")
with tf.Graph().as_default():
tf.Variable(287, name="global_step")
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertEquals(287, sess.run(sv.global_step))
sv.stop()
def testRestoreFromMetaGraph(self):
logdir = self._TestDir("restore_from_meta_graph")
with tf.Graph().as_default():
tf.Variable(1, name="v0")
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
filename = sv.saver.save(sess, sv.save_path)
sv.stop()
# Create a new Graph and Supervisor and recover.
with tf.Graph().as_default():
new_saver = tf.train.import_meta_graph(".".join([filename, "meta"]))
self.assertIsNotNone(new_saver)
sv2 = tf.train.Supervisor(logdir=logdir, saver=new_saver)
sess = sv2.prepare_or_wait_for_session("")
self.assertEquals(1, sess.run("v0:0"))
sv2.saver.save(sess, sv2.save_path)
sv2.stop()
def _wait_for_glob(self, pattern, timeout_secs):
"""Wait for a checkpoint file to appear.
Args:
pattern: A string.
timeout_secs: How long to wait for in seconds.
"""
end_time = time.time() + timeout_secs
while time.time() < end_time:
if len(tf.gfile.Glob(pattern)) >= 1:
return
time.sleep(0.05)
self.assertFalse(True, "Glob never matched any file: %s" % pattern)
# This test is based on the fact that the standard services start
# right away and get to run once before sv.stop() returns.
# We still sleep a bit to make the test robust.
def testStandardServicesWithoutGlobalStep(self):
logdir = self._TestDir("standard_services_without_global_step")
# Create a checkpoint.
with tf.Graph().as_default():
v = tf.Variable([1.0], name="foo")
tf.scalar_summary(["v"], v)
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(os.path.join(logdir, "*events*"), 3.0)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = tf.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
ev = next(rr)
self.assertProtoEquals("value { tag: 'v' simple_value: 1.0 }", ev.summary)
ev = next(rr)
self.assertEquals(tf.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with tf.Graph().as_default(), self.test_session() as sess:
v = tf.Variable([10.10], name="foo")
sav = tf.train.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(1.0, v.eval()[0])
# Same as testStandardServicesNoGlobalStep but with a global step.
# We should get a summary about the step time.
def testStandardServicesWithGlobalStep(self):
logdir = self._TestDir("standard_services_with_global_step")
# Create a checkpoint.
with tf.Graph().as_default():
v = tf.Variable([123], name="global_step")
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
# This is where the checkpoint will appear, with step number 123.
save_path = "%s-123" % sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(os.path.join(logdir, "*events*"), 3.0)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = tf.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
ev = next(rr)
# It is actually undeterministic whether SessionLog.START gets written
# before the summary or the checkpoint, but this works when run 10000 times.
self.assertEquals(123, ev.step)
self.assertEquals(tf.SessionLog.START, ev.session_log.status)
first = next(rr)
second = next(rr)
# It is undeterministic whether the value gets written before the checkpoint
# since they are on separate threads, so we check for both conditions.
if first.HasField("summary"):
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""",
first.summary)
self.assertEquals(123, second.step)
self.assertEquals(tf.SessionLog.CHECKPOINT, second.session_log.status)
else:
self.assertEquals(123, first.step)
self.assertEquals(tf.SessionLog.CHECKPOINT, first.session_log.status)
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""",
second.summary)
ev = next(rr)
self.assertEquals(tf.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with tf.Graph().as_default(), self.test_session() as sess:
v = tf.Variable([-12], name="global_step")
sav = tf.train.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(123, v.eval()[0])
def testNoQueueRunners(self):
with tf.Graph().as_default(), self.test_session() as sess:
sv = tf.train.Supervisor(logdir=self._TestDir("no_queue_runners"))
self.assertEqual(0, len(sv.start_queue_runners(sess)))
sv.stop()
def testPrepareSessionAfterStopForChief(self):
logdir = self._TestDir("prepare_after_stop_chief")
with tf.Graph().as_default():
sv = tf.train.Supervisor(logdir=logdir, is_chief=True)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
def testPrepareSessionAfterStopForNonChief(self):
logdir = self._TestDir("prepare_after_stop_nonchief")
with tf.Graph().as_default():
sv = tf.train.Supervisor(logdir=logdir, is_chief=False)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
if __name__ == "__main__":
tf.test.main()
| |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 10:46:06 2015
@author: Paco
"""
"""Copyleft 2010 Forrest Sheng Bao http://fsbao.net
PyEEG, a Python module to extract EEG features, v 0.02_r2
Project homepage: http://pyeeg.org
**Data structure**
PyEEG only uses standard Python and numpy data structures,
so you need to import numpy before using it.
For numpy, please visit http://numpy.scipy.org
**Naming convention**
I follow "Style Guide for Python Code" to code my program
http://www.python.org/dev/peps/pep-0008/
Constants: UPPER_CASE_WITH_UNDERSCORES, e.g., SAMPLING_RATE, LENGTH_SIGNAL.
Function names: lower_case_with_underscores, e.g., spectrum_entropy.
Variables (global and local): CapitalizedWords or CapWords, e.g., Power.
If a variable name consists of one letter, I may use lower case, e.g., x, y.
Functions listed alphabetically
--------------------------------------------------
"""
from numpy.fft import fft
from numpy import zeros, floor, log10, log, mean, array, sqrt, vstack, cumsum, \
ones, log2, std
from numpy.linalg import svd, lstsq
import time
######################## Functions contributed by Xin Liu #################
def hurst(X):
""" Compute the Hurst exponent of X. If the output H=0.5,the behavior
of the time-series is similar to random walk. If H<0.5, the time-series
cover less "distance" than a random walk, vice verse.
Parameters
----------
X
list
a time series
Returns
-------
H
float
Hurst exponent
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> a = randn(4096)
>>> pyeeg.hurst(a)
>>> 0.5057444
"""
N = len(X)
T = array([float(i) for i in xrange(1,N+1)])
Y = cumsum(X)
Ave_T = Y/T
S_T = zeros((N))
R_T = zeros((N))
for i in xrange(N):
S_T[i] = std(X[:i+1])
X_T = Y - T * Ave_T[i]
R_T[i] = max(X_T[:i + 1]) - min(X_T[:i + 1])
R_S = R_T / S_T
R_S = log(R_S)
n = log(T).reshape(N, 1)
H = lstsq(n[1:], R_S[1:])[0]
return H[0]
######################## Begin function definitions #######################
def embed_seq(X,Tau,D):
"""Build a set of embedding sequences from given time series X with lag Tau
and embedding dimension DE. Let X = [x(1), x(2), ... , x(N)], then for each
i such that 1 < i < N - (D - 1) * Tau, we build an embedding sequence,
Y(i) = [x(i), x(i + Tau), ... , x(i + (D - 1) * Tau)]. All embedding
sequence are placed in a matrix Y.
Parameters
----------
X
list
a time series
Tau
integer
the lag or delay when building embedding sequence
D
integer
the embedding dimension
Returns
-------
Y
2-D list
embedding matrix built
Examples
---------------
>>> import pyeeg
>>> a=range(0,9)
>>> pyeeg.embed_seq(a,1,4)
array([[ 0., 1., 2., 3.],
[ 1., 2., 3., 4.],
[ 2., 3., 4., 5.],
[ 3., 4., 5., 6.],
[ 4., 5., 6., 7.],
[ 5., 6., 7., 8.]])
>>> pyeeg.embed_seq(a,2,3)
array([[ 0., 2., 4.],
[ 1., 3., 5.],
[ 2., 4., 6.],
[ 3., 5., 7.],
[ 4., 6., 8.]])
>>> pyeeg.embed_seq(a,4,1)
array([[ 0.],
[ 1.],
[ 2.],
[ 3.],
[ 4.],
[ 5.],
[ 6.],
[ 7.],
[ 8.]])
"""
N =len(X)
if D * Tau > N:
print "Cannot build such a matrix, because D * Tau > N"
exit()
if Tau<1:
print "Tau has to be at least 1"
exit()
Y=zeros((N - (D - 1) * Tau, D))
for i in xrange(0, N - (D - 1) * Tau):
for j in xrange(0, D):
Y[i][j] = X[i + j * Tau]
return Y
def in_range(Template, Scroll, Distance):
"""Determines whether one vector is the the range of another vector.
The two vectors should have equal length.
Parameters
-----------------
Template
list
The template vector, one of two vectors being compared
Scroll
list
The scroll vector, one of the two vectors being compared
D
float
Two vectors match if their distance is less than D
Bit
Notes
-------
The distance between two vectors can be defined as Euclidean distance
according to some publications.
The two vector should of equal length
"""
for i in range(0, len(Template)):
if abs(Template[i] - Scroll[i]) > Distance:
return False
return True
""" Desperate code, but do not delete
def bit_in_range(Index):
if abs(Scroll[Index] - Template[Bit]) <= Distance :
print "Bit=", Bit, "Scroll[Index]", Scroll[Index], "Template[Bit]",\
Template[Bit], "abs(Scroll[Index] - Template[Bit])",\
abs(Scroll[Index] - Template[Bit])
return Index + 1 # move
Match_No_Tail = range(0, len(Scroll) - 1) # except the last one
# print Match_No_Tail
# first compare Template[:-2] and Scroll[:-2]
for Bit in xrange(0, len(Template) - 1): # every bit of Template is in range of Scroll
Match_No_Tail = filter(bit_in_range, Match_No_Tail)
print Match_No_Tail
# second and last, check whether Template[-1] is in range of Scroll and
# Scroll[-1] in range of Template
# 2.1 Check whether Template[-1] is in the range of Scroll
Bit = - 1
Match_All = filter(bit_in_range, Match_No_Tail)
# 2.2 Check whether Scroll[-1] is in the range of Template
# I just write a loop for this.
for i in Match_All:
if abs(Scroll[-1] - Template[i] ) <= Distance:
Match_All.remove(i)
return len(Match_All), len(Match_No_Tail)
"""
def bin_power(X,Band,Fs):
"""Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins.
"""
C = fft(X)
C = abs(C)
Power =zeros(len(Band)-1);
for Freq_Index in xrange(0,len(Band)-1):
Freq = float(Band[Freq_Index]) ## Xin Liu
Next_Freq = float(Band[Freq_Index+1])
Power[Freq_Index] = sum(C[int(floor(Freq/Fs*len(X))):int(floor(Next_Freq/Fs*len(X)))])
Power_Ratio = Power/sum(Power)
return Power, Power_Ratio
def first_order_diff(X):
""" Compute the first order difference of a time series.
For a time series X = [x(1), x(2), ... , x(N)], its first order
difference is:
Y = [x(2) - x(1) , x(3) - x(2), ..., x(N) - x(N-1)]
"""
D=[]
for i in xrange(1,len(X)):
D.append(X[i]-X[i-1])
return D
def pfd(X, D=None):
"""Compute Petrosian Fractal Dimension of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, the first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed by first_order_diff(X) function of pyeeg
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
"""
if D is None: ## Xin Liu
D = first_order_diff(X)
N_delta= 0; #number of sign changes in derivative of the signal
for i in xrange(1,len(D)):
if D[i]*D[i-1]<0:
N_delta += 1
n = len(X)
return log10(n)/(log10(n)+log10(n/n+0.4*N_delta))
def hfd(X, Kmax):
""" Compute Higuchi Fractal Dimension of a time series X, kmax
is an HFD parameter
"""
L = [];
x = []
N = len(X)
for k in xrange(1,Kmax):
Lk = []
for m in xrange(0,k):
Lmk = 0
for i in xrange(1,int(floor((N-m)/k))):
Lmk += abs(X[m+i*k] - X[m+i*k-k])
Lmk = Lmk*(N - 1)/floor((N - m) / float(k)) / k
Lk.append(Lmk)
L.append(log(mean(Lk)))
x.append([log(float(1) / k), 1])
(p, r1, r2, s)=lstsq(x, L)
return p[0]
def hjorth(X, D = None):
""" Compute Hjorth mobility and complexity of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, a first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed by first_order_diff(X) function of pyeeg
Notes
-----
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
Parameters
----------
X
list
a time series
D
list
first order differential sequence of a time series
Returns
-------
As indicated in return line
Hjorth mobility and complexity
"""
if D is None:
D = first_order_diff(X)
D.insert(0, X[0]) # pad the first difference
D = array(D)
n = len(X)
M2 = float(sum(D ** 2)) / n
TP = sum(array(X) ** 2)
M4 = 0;
for i in xrange(1, len(D)):
M4 += (D[i] - D[i - 1]) ** 2
M4 = M4 / n
return sqrt(M2 / TP), sqrt(float(M4) * TP / M2 / M2) # Hjorth Mobility and Complexity
def spectral_entropy(X, Band, Fs, Power_Ratio = None):
"""Compute spectral entropy of a time series from either two cases below:
1. X, the time series (default)
2. Power_Ratio, a list of normalized signal power in a set of frequency
bins defined in Band (if Power_Ratio is provided, recommended to speed up)
In case 1, Power_Ratio is computed by bin_power() function.
Notes
-----
To speed up, it is recommended to compute Power_Ratio before calling this
function because it may also be used by other functions whereas computing
it here again will slow down.
Parameters
----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
As indicated in return line
See Also
--------
bin_power: pyeeg function that computes spectral power in frequency bins
"""
if Power_Ratio is None:
Power, Power_Ratio = bin_power(X, Band, Fs)
Spectral_Entropy = 0
for i in xrange(0, len(Power_Ratio) - 1):
Spectral_Entropy += Power_Ratio[i] * log(Power_Ratio[i])
Spectral_Entropy /= log(len(Power_Ratio)) # to save time, minus one is omitted
return -1 * Spectral_Entropy
def svd_entropy(X, Tau, DE, W = None):
"""Compute SVD Entropy from either two cases below:
1. a time series X, with lag tau and embedding dimension dE (default)
2. a list, W, of normalized singular values of a matrix (if W is provided,
recommend to speed up.)
If W is None, the function will do as follows to prepare singular spectrum:
First, computer an embedding matrix from X, Tau and DE using pyeeg
function embed_seq():
M = embed_seq(X, Tau, DE)
Second, use scipy.linalg function svd to decompose the embedding matrix
M and obtain a list of singular values:
W = svd(M, compute_uv=0)
At last, normalize W:
W /= sum(W)
Notes
-------------
To speed up, it is recommended to compute W before calling this function
because W may also be used by other functions whereas computing it here
again will slow down.
"""
if W is None:
Y = embed_seq(X, Tau, DE)
W = svd(Y, compute_uv = 0)
W /= sum(W) # normalize singular values
return -1*sum(W * log(W))
def fisher_info(X, Tau, DE, W = None):
""" Compute Fisher information of a time series from either two cases below:
1. X, a time series, with lag Tau and embedding dimension DE (default)
2. W, a list of normalized singular values, i.e., singular spectrum (if W is
provided, recommended to speed up.)
If W is None, the function will do as follows to prepare singular spectrum:
First, computer an embedding matrix from X, Tau and DE using pyeeg
function embed_seq():
M = embed_seq(X, Tau, DE)
Second, use scipy.linalg function svd to decompose the embedding matrix
M and obtain a list of singular values:
W = svd(M, compute_uv=0)
At last, normalize W:
W /= sum(W)
Parameters
----------
X
list
a time series. X will be used to build embedding matrix and compute
singular values if W or M is not provided.
Tau
integer
the lag or delay when building a embedding sequence. Tau will be used
to build embedding matrix and compute singular values if W or M is not
provided.
DE
integer
the embedding dimension to build an embedding matrix from a given
series. DE will be used to build embedding matrix and compute
singular values if W or M is not provided.
W
list or array
the set of singular values, i.e., the singular spectrum
Returns
-------
FI
integer
Fisher information
Notes
-----
To speed up, it is recommended to compute W before calling this function
because W may also be used by other functions whereas computing it here
again will slow down.
See Also
--------
embed_seq : embed a time series into a matrix
"""
if W is None:
M = embed_seq(X, Tau, DE)
W = svd(M, compute_uv = 0)
W /= sum(W)
FI = 0
for i in xrange(0, len(W) - 1): # from 1 to M
FI += ((W[i +1] - W[i]) ** 2) / (W[i])
return FI
def ap_entropy(X, M, R):
"""Computer approximate entropy (ApEN) of series X, specified by M and R.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of Em
is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension are
1 and M-1 respectively. Such a matrix can be built by calling pyeeg function
as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elments
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and Em[j]
is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two 1-D
vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance between them
is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the value of R is
defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M + 1, we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k. The probability that a random row in Em matches Em[i] is
\simga_1^{N-M+1} k[i] / (N - M + 1), thus sum(k)/ (N - M + 1),
denoted as Cm[i].
We repeat the same process on Emp and obtained Cmp[i], but here 0<i<N-M
since the length of each sequence in Emp is M + 1.
The probability that any two embedding sequences in Em match is then
sum(Cm)/ (N - M +1 ). We define Phi_m = sum(log(Cm)) / (N - M + 1) and
Phi_mp = sum(log(Cmp)) / (N - M ).
And the ApEn is defined as Phi_m - Phi_mp.
Notes
-----
#. Please be aware that self-match is also counted in ApEn.
#. This function now runs very slow. We are still trying to speed it up.
References
----------
Costa M, Goldberger AL, Peng CK, Multiscale entropy analysis of biolgical
signals, Physical Review E, 71:021906, 2005
See also
--------
samp_entropy: sample entropy of a time series
Notes
-----
Extremely slow implementation. Do NOT use if your dataset is not small.
"""
N = len(X)
Em = embed_seq(X, 1, M)
Emp = embed_seq(X, 1, M + 1) # try to only build Emp to save time
Cm, Cmp = zeros(N - M + 1), zeros(N - M)
# in case there is 0 after counting. Log(0) is undefined.
for i in xrange(0, N - M):
# print i
for j in xrange(i, N - M): # start from i, self-match counts in ApEn
# if max(abs(Em[i]-Em[j])) <= R:# compare N-M scalars in each subseq v 0.01b_r1
if in_range(Em[i], Em[j], R):
Cm[i] += 1 ### Xin Liu
Cm[j] += 1
if abs(Emp[i][-1] - Emp[j][-1]) <= R: # check last one
Cmp[i] += 1
Cmp[j] += 1
if in_range(Em[i], Em[N-M], R):
Cm[i] += 1
Cm[N-M] += 1
# try to count Cm[j] and Cmp[j] as well here
# if max(abs(Em[N-M]-Em[N-M])) <= R: # index from 0, so N-M+1 is N-M v 0.01b_r1
# if in_range(Em[i], Em[N - M], R): # for Cm, there is one more iteration than Cmp
# Cm[N - M] += 1 # cross-matches on Cm[N - M]
Cm[N - M] += 1 # Cm[N - M] self-matches
# import code;code.interact(local=locals())
Cm /= (N - M +1 )
Cmp /= ( N - M )
# import code;code.interact(local=locals())
Phi_m, Phi_mp = sum(log(Cm)), sum(log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En
def samp_entropy(X, M, R):
"""Computer sample entropy (SampEn) of series X, specified by M and R.
SampEn is very close to ApEn.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of Em
is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension are
1 and M-1 respectively. Such a matrix can be built by calling pyeeg function
as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elments
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and Em[j]
is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two 1-D
vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance between them
is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the value of R is
defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M , we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k.
We repeat the same process on Emp and obtained Cmp[i], 0 < i < N - M.
The SampEn is defined as log(sum(Cm)/sum(Cmp))
References
----------
Costa M, Goldberger AL, Peng C-K, Multiscale entropy analysis of biolgical
signals, Physical Review E, 71:021906, 2005
See also
--------
ap_entropy: approximate entropy of a time series
Notes
-----
Extremely slow computation. Do NOT use if your dataset is not small and you
are not patient enough.
"""
N = len(X)
Em = embed_seq(X, 1, M)
Emp = embed_seq(X, 1, M + 1)
Cm, Cmp = zeros(N - M - 1) + 1e-100, zeros(N - M - 1) + 1e-100
# in case there is 0 after counting. Log(0) is undefined.
for i in xrange(0, N - M):
for j in xrange(i + 1, N - M): # no self-match
# if max(abs(Em[i]-Em[j])) <= R: # v 0.01_b_r1
if in_range(Em[i], Em[j], R):
Cm[i] += 1
# if max(abs(Emp[i] - Emp[j])) <= R: # v 0.01_b_r1
if abs(Emp[i][-1] - Emp[j][-1]) <= R: # check last one
Cmp[i] += 1
Samp_En = log(sum(Cm)/sum(Cmp))
return Samp_En
def dfa(X, Ave = None, L = None):
"""Compute Detrended Fluctuation Analysis from a time series X and length of
boxes L.
The first step to compute DFA is to integrate the signal. Let original seres
be X= [x(1), x(2), ..., x(N)].
The integrated signal Y = [y(1), y(2), ..., y(N)] is otained as follows
y(k) = \sum_{i=1}^{k}{x(i)-Ave} where Ave is the mean of X.
The second step is to partition/slice/segment the integrated sequence Y into
boxes. At least two boxes are needed for computing DFA. Box sizes are
specified by the L argument of this function. By default, it is from 1/5 of
signal length to one (x-5)-th of the signal length, where x is the nearest
power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...
In each box, a linear least square fitting is employed on data in the box.
Denote the series on fitted line as Yn. Its k-th elements, yn(k),
corresponds to y(k).
For fitting in each box, there is a residue, the sum of squares of all
offsets, difference between actual points and points on fitted line.
F(n) denotes the square root of average total residue in all boxes when box
length is n, thus
Total_Residue = \sum_{k=1}^{N}{(y(k)-yn(k))}
F(n) = \sqrt(Total_Residue/N)
The computing to F(n) is carried out for every box length n. Therefore, a
relationship between n and F(n) can be obtained. In general, F(n) increases
when n increases.
Finally, the relationship between F(n) and n is analyzed. A least square
fitting is performed between log(F(n)) and log(n). The slope of the fitting
line is the DFA value, denoted as Alpha. To white noise, Alpha should be
0.5. Higher level of signal complexity is related to higher Alpha.
Parameters
----------
X:
1-D Python list or numpy array
a time series
Ave:
integer, optional
The average value of the time series
L:
1-D Python list of integers
A list of box size, integers in ascending order
Returns
-------
Alpha:
integer
the result of DFA analysis, thus the slope of fitting line of log(F(n))
vs. log(n). where n is the
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> print pyeeg.dfa(randn(4096))
0.490035110345
Reference
---------
Peng C-K, Havlin S, Stanley HE, Goldberger AL. Quantification of scaling
exponents and crossover phenomena in nonstationary heartbeat time series.
_Chaos_ 1995;5:82-87
Notes
-----
This value depends on the box sizes very much. When the input is a white
noise, this value should be 0.5. But, some choices on box sizes can lead to
the value lower or higher than 0.5, e.g. 0.38 or 0.58.
Based on many test, I set the box sizes from 1/5 of signal length to one
(x-5)-th of the signal length, where x is the nearest power of 2 from the
length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...
You may generate a list of box sizes and pass in such a list as a parameter.
"""
X = array(X)
if Ave is None:
Ave = mean(X)
Y = cumsum(X)
Y -= Ave
if L is None:
L = floor(len(X)*1/(2**array(range(4,int(log2(len(X)))-4))))
F = zeros(len(L)) # F(n) of different given box length n
for i in xrange(0,len(L)):
n = int(L[i]) # for each box length L[i]
if n==0:
print "time series is too short while the box length is too big"
print "abort"
exit()
for j in xrange(0,len(X),n): # for each box
if j+n < len(X):
c = range(j,j+n)
c = vstack([c, ones(n)]).T # coordinates of time in the box
y = Y[j:j+n] # the value of data in the box
F[i] += lstsq(c,y)[1] # add residue in this box
F[i] /= ((len(X)/n)*n)
F = sqrt(F)
Alpha = lstsq(vstack([log(L), ones(len(L))]).T,log(F))[0][0]
return Alpha
| |
from __future__ import absolute_import
import Cookie
import urllib
import urlparse
import time
import copy
from email.utils import parsedate_tz, formatdate, mktime_tz
import threading
from netlib import http, tcp, http_status
import netlib.utils
from netlib.odict import ODict, ODictCaseless
from .tcp import TCPHandler
from .primitives import KILL, ProtocolHandler, Flow, Error
from ..proxy.connection import ServerConnection
from .. import encoding, utils, controller, stateobject, proxy
HDR_FORM_URLENCODED = "application/x-www-form-urlencoded"
CONTENT_MISSING = 0
class KillSignal(Exception):
pass
def get_line(fp):
"""
Get a line, possibly preceded by a blank.
"""
line = fp.readline()
if line == "\r\n" or line == "\n":
# Possible leftover from previous message
line = fp.readline()
if line == "":
raise tcp.NetLibDisconnect()
return line
def send_connect_request(conn, host, port, update_state=True):
upstream_request = HTTPRequest(
"authority",
"CONNECT",
None,
host,
port,
None,
(1, 1),
ODictCaseless(),
""
)
conn.send(upstream_request.assemble())
resp = HTTPResponse.from_stream(conn.rfile, upstream_request.method)
if resp.code != 200:
raise proxy.ProxyError(resp.code,
"Cannot establish SSL " +
"connection with upstream proxy: \r\n" +
str(resp.assemble()))
if update_state:
conn.state.append(("http", {
"state": "connect",
"host": host,
"port": port}
))
return resp
class decoded(object):
"""
A context manager that decodes a request or response, and then
re-encodes it with the same encoding after execution of the block.
Example:
with decoded(request):
request.content = request.content.replace("foo", "bar")
"""
def __init__(self, o):
self.o = o
ce = o.headers.get_first("content-encoding")
if ce in encoding.ENCODINGS:
self.ce = ce
else:
self.ce = None
def __enter__(self):
if self.ce:
self.o.decode()
def __exit__(self, type, value, tb):
if self.ce:
self.o.encode(self.ce)
class HTTPMessage(stateobject.StateObject):
"""
Base class for HTTPRequest and HTTPResponse
"""
def __init__(self, httpversion, headers, content, timestamp_start=None,
timestamp_end=None):
self.httpversion = httpversion
self.headers = headers
"""@type: ODictCaseless"""
self.content = content
self.timestamp_start = timestamp_start
self.timestamp_end = timestamp_end
_stateobject_attributes = dict(
httpversion=tuple,
headers=ODictCaseless,
content=str,
timestamp_start=float,
timestamp_end=float
)
_stateobject_long_attributes = {"content"}
def get_state(self, short=False):
ret = super(HTTPMessage, self).get_state(short)
if short:
if self.content:
ret["contentLength"] = len(self.content)
else:
ret["contentLength"] = 0
return ret
def get_decoded_content(self):
"""
Returns the decoded content based on the current Content-Encoding
header.
Doesn't change the message iteself or its headers.
"""
ce = self.headers.get_first("content-encoding")
if not self.content or ce not in encoding.ENCODINGS:
return self.content
return encoding.decode(ce, self.content)
def decode(self):
"""
Decodes content based on the current Content-Encoding header, then
removes the header. If there is no Content-Encoding header, no
action is taken.
Returns True if decoding succeeded, False otherwise.
"""
ce = self.headers.get_first("content-encoding")
if not self.content or ce not in encoding.ENCODINGS:
return False
data = encoding.decode(ce, self.content)
if data is None:
return False
self.content = data
del self.headers["content-encoding"]
return True
def encode(self, e):
"""
Encodes content with the encoding e, where e is "gzip", "deflate"
or "identity".
"""
# FIXME: Error if there's an existing encoding header?
self.content = encoding.encode(e, self.content)
self.headers["content-encoding"] = [e]
def size(self, **kwargs):
"""
Size in bytes of a fully rendered message, including headers and
HTTP lead-in.
"""
hl = len(self._assemble_head(**kwargs))
if self.content:
return hl + len(self.content)
else:
return hl
def copy(self):
c = copy.copy(self)
c.headers = self.headers.copy()
return c
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in both the headers
and the body of the message. Encoded content will be decoded
before replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
with decoded(self):
self.content, c = utils.safe_subn(
pattern, repl, self.content, *args, **kwargs
)
c += self.headers.replace(pattern, repl, *args, **kwargs)
return c
def _assemble_first_line(self):
"""
Returns the assembled request/response line
"""
raise NotImplementedError() # pragma: nocover
def _assemble_headers(self):
"""
Returns the assembled headers
"""
raise NotImplementedError() # pragma: nocover
def _assemble_head(self):
"""
Returns the assembled request/response line plus headers
"""
raise NotImplementedError() # pragma: nocover
def assemble(self):
"""
Returns the assembled request/response
"""
raise NotImplementedError() # pragma: nocover
class HTTPRequest(HTTPMessage):
"""
An HTTP request.
Exposes the following attributes:
method: HTTP method
scheme: URL scheme (http/https)
host: Target hostname of the request. This is not neccessarily the
directy upstream server (which could be another proxy), but it's always
the target server we want to reach at the end. This attribute is either
inferred from the request itself (absolute-form, authority-form) or from
the connection metadata (e.g. the host in reverse proxy mode).
port: Destination port
path: Path portion of the URL (not present in authority-form)
httpversion: HTTP version tuple, e.g. (1,1)
headers: ODictCaseless object
content: Content of the request, None, or CONTENT_MISSING if there
is content associated, but not present. CONTENT_MISSING evaluates
to False to make checking for the presence of content natural.
form_in: The request form which mitmproxy has received. The following
values are possible:
- relative (GET /index.html, OPTIONS *) (covers origin form and
asterisk form)
- absolute (GET http://example.com:80/index.html)
- authority-form (CONNECT example.com:443)
Details: http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-25#section-5.3
form_out: The request form which mitmproxy will send out to the
destination
timestamp_start: Timestamp indicating when request transmission started
timestamp_end: Timestamp indicating when request transmission ended
"""
def __init__(
self,
form_in,
method,
scheme,
host,
port,
path,
httpversion,
headers,
content,
timestamp_start=None,
timestamp_end=None,
form_out=None
):
assert isinstance(headers, ODictCaseless) or not headers
HTTPMessage.__init__(
self,
httpversion,
headers,
content,
timestamp_start,
timestamp_end
)
self.form_in = form_in
self.method = method
self.scheme = scheme
self.host = host
self.port = port
self.path = path
self.httpversion = httpversion
self.form_out = form_out or form_in
# Have this request's cookies been modified by sticky cookies or auth?
self.stickycookie = False
self.stickyauth = False
# Is this request replayed?
self.is_replay = False
_stateobject_attributes = HTTPMessage._stateobject_attributes.copy()
_stateobject_attributes.update(
form_in=str,
method=str,
scheme=str,
host=str,
port=int,
path=str,
form_out=str
)
@classmethod
def from_state(cls, state):
f = cls(None, None, None, None, None, None, None, None, None, None, None)
f.load_state(state)
return f
def __repr__(self):
return "<HTTPRequest: {0}>".format(
self._assemble_first_line(self.form_in)[:-9]
)
@classmethod
def from_stream(cls, rfile, include_body=True, body_size_limit=None):
"""
Parse an HTTP request from a file stream
"""
httpversion, host, port, scheme, method, path, headers, content, timestamp_start, timestamp_end = (
None, None, None, None, None, None, None, None, None, None)
timestamp_start = utils.timestamp()
if hasattr(rfile, "reset_timestamps"):
rfile.reset_timestamps()
request_line = get_line(rfile)
if hasattr(rfile, "first_byte_timestamp"):
# more accurate timestamp_start
timestamp_start = rfile.first_byte_timestamp
request_line_parts = http.parse_init(request_line)
if not request_line_parts:
raise http.HttpError(
400,
"Bad HTTP request line: %s" % repr(request_line)
)
method, path, httpversion = request_line_parts
if path == '*' or path.startswith("/"):
form_in = "relative"
if not netlib.utils.isascii(path):
raise http.HttpError(
400,
"Bad HTTP request line: %s" % repr(request_line)
)
elif method.upper() == 'CONNECT':
form_in = "authority"
r = http.parse_init_connect(request_line)
if not r:
raise http.HttpError(
400,
"Bad HTTP request line: %s" % repr(request_line)
)
host, port, _ = r
path = None
else:
form_in = "absolute"
r = http.parse_init_proxy(request_line)
if not r:
raise http.HttpError(
400,
"Bad HTTP request line: %s" % repr(request_line)
)
_, scheme, host, port, path, _ = r
headers = http.read_headers(rfile)
if headers is None:
raise http.HttpError(400, "Invalid headers")
if include_body:
content = http.read_http_body(rfile, headers, body_size_limit,
method, None, True)
timestamp_end = utils.timestamp()
return HTTPRequest(
form_in,
method,
scheme,
host,
port,
path,
httpversion,
headers,
content,
timestamp_start,
timestamp_end
)
def _assemble_first_line(self, form=None):
form = form or self.form_out
if form == "relative":
request_line = '%s %s HTTP/%s.%s' % (
self.method, self.path, self.httpversion[0], self.httpversion[1]
)
elif form == "authority":
request_line = '%s %s:%s HTTP/%s.%s' % (
self.method, self.host, self.port, self.httpversion[0],
self.httpversion[1]
)
elif form == "absolute":
request_line = '%s %s://%s:%s%s HTTP/%s.%s' % (
self.method, self.scheme, self.host,
self.port, self.path, self.httpversion[0],
self.httpversion[1]
)
else:
raise http.HttpError(400, "Invalid request form")
return request_line
# This list is adopted legacy code.
# We probably don't need to strip off keep-alive.
_headers_to_strip_off = ['Proxy-Connection',
'Keep-Alive',
'Connection',
'Transfer-Encoding',
'Upgrade']
def _assemble_headers(self):
headers = self.headers.copy()
for k in self._headers_to_strip_off:
del headers[k]
if 'host' not in headers and self.scheme and self.host and self.port:
headers["Host"] = [utils.hostport(self.scheme,
self.host,
self.port)]
# If content is defined (i.e. not None or CONTENT_MISSING), we always add a content-length header.
if self.content or self.content == "":
headers["Content-Length"] = [str(len(self.content))]
return str(headers)
def _assemble_head(self, form=None):
return "%s\r\n%s\r\n" % (
self._assemble_first_line(form), self._assemble_headers()
)
def assemble(self, form=None):
"""
Assembles the request for transmission to the server. We make some
modifications to make sure interception works properly.
Raises an Exception if the request cannot be assembled.
"""
if self.content == CONTENT_MISSING:
raise proxy.ProxyError(
502,
"Cannot assemble flow with CONTENT_MISSING"
)
head = self._assemble_head(form)
if self.content:
return head + self.content
else:
return head
def __hash__(self):
return id(self)
def anticache(self):
"""
Modifies this request to remove headers that might produce a cached
response. That is, we remove ETags and If-Modified-Since headers.
"""
delheaders = [
"if-modified-since",
"if-none-match",
]
for i in delheaders:
del self.headers[i]
def anticomp(self):
"""
Modifies this request to remove headers that will compress the
resource's data.
"""
self.headers["accept-encoding"] = ["identity"]
def constrain_encoding(self):
"""
Limits the permissible Accept-Encoding values, based on what we can
decode appropriately.
"""
if self.headers["accept-encoding"]:
self.headers["accept-encoding"] = [', '.join(
e for e in encoding.ENCODINGS if e in self.headers["accept-encoding"][0]
)]
def update_host_header(self):
"""
Update the host header to reflect the current target.
"""
self.headers["Host"] = [self.host]
def get_form_urlencoded(self):
"""
Retrieves the URL-encoded form data, returning an ODict object.
Returns an empty ODict if there is no data or the content-type
indicates non-form data.
"""
if self.content and self.headers.in_any("content-type", HDR_FORM_URLENCODED, True):
return ODict(utils.urldecode(self.content))
return ODict([])
def set_form_urlencoded(self, odict):
"""
Sets the body to the URL-encoded form data, and adds the
appropriate content-type header. Note that this will destory the
existing body if there is one.
"""
# FIXME: If there's an existing content-type header indicating a
# url-encoded form, leave it alone.
self.headers["Content-Type"] = [HDR_FORM_URLENCODED]
self.content = utils.urlencode(odict.lst)
def get_path_components(self):
"""
Returns the path components of the URL as a list of strings.
Components are unquoted.
"""
_, _, path, _, _, _ = urlparse.urlparse(self.url)
return [urllib.unquote(i) for i in path.split("/") if i]
def set_path_components(self, lst):
"""
Takes a list of strings, and sets the path component of the URL.
Components are quoted.
"""
lst = [urllib.quote(i, safe="") for i in lst]
path = "/" + "/".join(lst)
scheme, netloc, _, params, query, fragment = urlparse.urlparse(self.url)
self.url = urlparse.urlunparse(
[scheme, netloc, path, params, query, fragment]
)
def get_query(self):
"""
Gets the request query string. Returns an ODict object.
"""
_, _, _, _, query, _ = urlparse.urlparse(self.url)
if query:
return ODict(utils.urldecode(query))
return ODict([])
def set_query(self, odict):
"""
Takes an ODict object, and sets the request query string.
"""
scheme, netloc, path, params, _, fragment = urlparse.urlparse(self.url)
query = utils.urlencode(odict.lst)
self.url = urlparse.urlunparse(
[scheme, netloc, path, params, query, fragment]
)
def pretty_host(self, hostheader):
"""
Heuristic to get the host of the request.
Note that pretty_host() does not always return the TCP destination
of the request, e.g. if an upstream proxy is in place
If hostheader is set to True, the Host: header will be used as
additional (and preferred) data source. This is handy in transparent
mode, where only the ip of the destination is known, but not the
resolved name. This is disabled by default, as an attacker may spoof
the host header to confuse an analyst.
"""
host = None
if hostheader:
host = self.headers.get_first("host")
if not host:
host = self.host
host = host.encode("idna")
return host
def pretty_url(self, hostheader):
if self.form_out == "authority": # upstream proxy mode
return "%s:%s" % (self.pretty_host(hostheader), self.port)
return utils.unparse_url(self.scheme,
self.pretty_host(hostheader),
self.port,
self.path).encode('ascii')
@property
def url(self):
"""
Returns a URL string, constructed from the Request's URL components.
"""
return utils.unparse_url(
self.scheme,
self.host,
self.port,
self.path
).encode('ascii')
@url.setter
def url(self, url):
"""
Parses a URL specification, and updates the Request's information
accordingly.
Returns False if the URL was invalid, True if the request succeeded.
"""
parts = http.parse_url(url)
if not parts:
raise ValueError("Invalid URL: %s" % url)
self.scheme, self.host, self.port, self.path = parts
def get_cookies(self):
cookie_headers = self.headers.get("cookie")
if not cookie_headers:
return None
cookies = []
for header in cookie_headers:
pairs = [pair.partition("=") for pair in header.split(';')]
cookies.extend((pair[0], (pair[2], {})) for pair in pairs)
return dict(cookies)
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in the headers, the
request path and the body of the request. Encoded content will be
decoded before replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
c = HTTPMessage.replace(self, pattern, repl, *args, **kwargs)
self.path, pc = utils.safe_subn(
pattern, repl, self.path, *args, **kwargs
)
c += pc
return c
class HTTPResponse(HTTPMessage):
"""
An HTTP response.
Exposes the following attributes:
httpversion: HTTP version tuple, e.g. (1,1)
code: HTTP response code
msg: HTTP response message
headers: ODict object
content: Content of the request, None, or CONTENT_MISSING if there
is content associated, but not present. CONTENT_MISSING evaluates
to False to make checking for the presence of content natural.
timestamp_start: Timestamp indicating when request transmission started
timestamp_end: Timestamp indicating when request transmission ended
"""
def __init__(self, httpversion, code, msg, headers, content, timestamp_start=None,
timestamp_end=None):
assert isinstance(headers, ODictCaseless) or headers is None
HTTPMessage.__init__(
self,
httpversion,
headers,
content,
timestamp_start,
timestamp_end
)
self.code = code
self.msg = msg
# Is this request replayed?
self.is_replay = False
self.stream = False
_stateobject_attributes = HTTPMessage._stateobject_attributes.copy()
_stateobject_attributes.update(
code=int,
msg=str
)
@classmethod
def get_decoded_content(self):
return self.get_decoded_content
@classmethod
def from_state(cls, state):
f = cls(None, None, None, None, None)
f.load_state(state)
return f
def __repr__(self):
size = utils.pretty_size(len(self.content)) if self.content else "content missing"
return "<HTTPResponse: {code} {msg} ({contenttype}, {size})>".format(
code=self.code,
msg=self.msg,
contenttype=self.headers.get_first(
"content-type", "unknown content type"
),
size=size
)
@classmethod
def from_stream(cls, rfile, request_method, include_body=True, body_size_limit=None):
"""
Parse an HTTP response from a file stream
"""
timestamp_start = utils.timestamp()
if hasattr(rfile, "reset_timestamps"):
rfile.reset_timestamps()
httpversion, code, msg, headers, content = http.read_response(
rfile,
request_method,
body_size_limit,
include_body=include_body)
if hasattr(rfile, "first_byte_timestamp"):
# more accurate timestamp_start
timestamp_start = rfile.first_byte_timestamp
if include_body:
timestamp_end = utils.timestamp()
else:
timestamp_end = None
return HTTPResponse(
httpversion,
code,
msg,
headers,
content,
timestamp_start,
timestamp_end
)
def _assemble_first_line(self):
return 'HTTP/%s.%s %s %s' % \
(self.httpversion[0], self.httpversion[1], self.code, self.msg)
_headers_to_strip_off = ['Proxy-Connection',
'Alternate-Protocol',
'Alt-Svc']
def _assemble_headers(self, preserve_transfer_encoding=False):
headers = self.headers.copy()
for k in self._headers_to_strip_off:
del headers[k]
if not preserve_transfer_encoding:
del headers['Transfer-Encoding']
# If content is defined (i.e. not None or CONTENT_MISSING), we always add a content-length header.
if self.content or self.content == "":
headers["Content-Length"] = [str(len(self.content))]
return str(headers)
def _assemble_head(self, preserve_transfer_encoding=False):
return '%s\r\n%s\r\n' % (
self._assemble_first_line(),
self._assemble_headers(
preserve_transfer_encoding=preserve_transfer_encoding
)
)
def assemble(self):
"""
Assembles the response for transmission to the client. We make some
modifications to make sure interception works properly.
Raises an Exception if the request cannot be assembled.
"""
if self.content == CONTENT_MISSING:
raise proxy.ProxyError(
502,
"Cannot assemble flow with CONTENT_MISSING"
)
head = self._assemble_head()
if self.content:
return head + self.content
else:
return head
def _refresh_cookie(self, c, delta):
"""
Takes a cookie string c and a time delta in seconds, and returns
a refreshed cookie string.
"""
c = Cookie.SimpleCookie(str(c))
for i in c.values():
if "expires" in i:
d = parsedate_tz(i["expires"])
if d:
d = mktime_tz(d) + delta
i["expires"] = formatdate(d)
else:
# This can happen when the expires tag is invalid.
# reddit.com sends a an expires tag like this: "Thu, 31 Dec
# 2037 23:59:59 GMT", which is valid RFC 1123, but not
# strictly correct according to the cookie spec. Browsers
# appear to parse this tolerantly - maybe we should too.
# For now, we just ignore this.
del i["expires"]
return c.output(header="").strip()
def refresh(self, now=None):
"""
This fairly complex and heuristic function refreshes a server
response for replay.
- It adjusts date, expires and last-modified headers.
- It adjusts cookie expiration.
"""
if not now:
now = time.time()
delta = now - self.timestamp_start
refresh_headers = [
"date",
"expires",
"last-modified",
]
for i in refresh_headers:
if i in self.headers:
d = parsedate_tz(self.headers[i][0])
if d:
new = mktime_tz(d) + delta
self.headers[i] = [formatdate(new)]
c = []
for i in self.headers["set-cookie"]:
c.append(self._refresh_cookie(i, delta))
if c:
self.headers["set-cookie"] = c
def get_cookies(self):
cookie_headers = self.headers.get("set-cookie")
if not cookie_headers:
return None
cookies = []
for header in cookie_headers:
pairs = [pair.partition("=") for pair in header.split(';')]
cookie_name = pairs[0][0] # the key of the first key/value pairs
cookie_value = pairs[0][2] # the value of the first key/value pairs
cookie_parameters = {
key.strip().lower(): value.strip() for key, sep, value in pairs[1:]
}
cookies.append((cookie_name, (cookie_value, cookie_parameters)))
return dict(cookies)
class HTTPFlow(Flow):
"""
A HTTPFlow is a collection of objects representing a single HTTP
transaction. The main attributes are:
request: HTTPRequest object
response: HTTPResponse object
error: Error object
server_conn: ServerConnection object
client_conn: ClientConnection object
Note that it's possible for a Flow to have both a response and an error
object. This might happen, for instance, when a response was received
from the server, but there was an error sending it back to the client.
The following additional attributes are exposed:
intercepting: Is this flow currently being intercepted?
live: Does this flow have a live client connection?
"""
def __init__(self, client_conn, server_conn, live=None):
super(HTTPFlow, self).__init__("http", client_conn, server_conn, live)
self.request = None
"""@type: HTTPRequest"""
self.response = None
"""@type: HTTPResponse"""
# FIXME: Should that rather be an attribute of Flow?
self.intercepting = False
_stateobject_attributes = Flow._stateobject_attributes.copy()
_stateobject_attributes.update(
request=HTTPRequest,
response=HTTPResponse
)
@classmethod
def from_state(cls, state):
f = cls(None, None)
f.load_state(state)
return f
def __repr__(self):
s = "<HTTPFlow"
for a in ("request", "response", "error", "client_conn", "server_conn"):
if getattr(self, a, False):
s += "\r\n %s = {flow.%s}" % (a, a)
s += ">"
return s.format(flow=self)
def copy(self):
f = super(HTTPFlow, self).copy()
if self.request:
f.request = self.request.copy()
if self.response:
f.response = self.response.copy()
return f
def match(self, f):
"""
Match this flow against a compiled filter expression. Returns True
if matched, False if not.
If f is a string, it will be compiled as a filter expression. If
the expression is invalid, ValueError is raised.
"""
if isinstance(f, basestring):
from .. import filt
f = filt.parse(f)
if not f:
raise ValueError("Invalid filter expression.")
if f:
return f(self)
return True
def kill(self, master):
"""
Kill this request.
"""
self.error = Error("Connection killed")
self.intercepting = False
self.reply(KILL)
self.reply = controller.DummyReply()
master.handle_error(self)
def intercept(self):
"""
Intercept this Flow. Processing will stop until accept_intercept is
called.
"""
self.intercepting = True
def accept_intercept(self):
"""
Continue with the flow - called after an intercept().
"""
self.intercepting = False
self.reply()
def replace(self, pattern, repl, *args, **kwargs):
"""
Replaces a regular expression pattern with repl in both request and
response of the flow. Encoded content will be decoded before
replacement, and re-encoded afterwards.
Returns the number of replacements made.
"""
c = self.request.replace(pattern, repl, *args, **kwargs)
if self.response:
c += self.response.replace(pattern, repl, *args, **kwargs)
return c
class HttpAuthenticationError(Exception):
def __init__(self, auth_headers=None):
super(HttpAuthenticationError, self).__init__(
"Proxy Authentication Required"
)
self.headers = auth_headers
self.code = 407
def __repr__(self):
return "Proxy Authentication Required"
class HTTPHandler(ProtocolHandler):
"""
HTTPHandler implements mitmproxys understanding of the HTTP protocol.
"""
def __init__(self, c):
super(HTTPHandler, self).__init__(c)
self.expected_form_in = c.config.mode.http_form_in
self.expected_form_out = c.config.mode.http_form_out
self.skip_authentication = False
def handle_messages(self):
while self.handle_flow():
pass
def get_response_from_server(self, flow):
self.c.establish_server_connection()
request_raw = flow.request.assemble()
for attempt in (0, 1):
try:
self.c.server_conn.send(request_raw)
# Only get the headers at first...
flow.response = HTTPResponse.from_stream(
self.c.server_conn.rfile, flow.request.method,
body_size_limit=self.c.config.body_size_limit,
include_body=False
)
break
except (tcp.NetLibDisconnect, http.HttpErrorConnClosed), v:
self.c.log(
"error in server communication: %s" % repr(v),
level="debug"
)
if attempt == 0:
# In any case, we try to reconnect at least once. This is
# necessary because it might be possible that we already
# initiated an upstream connection after clientconnect that
# has already been expired, e.g consider the following event
# log:
# > clientconnect (transparent mode destination known)
# > serverconnect
# > read n% of large request
# > server detects timeout, disconnects
# > read (100-n)% of large request
# > send large request upstream
self.c.server_reconnect()
else:
raise
# call the appropriate script hook - this is an opportunity for an
# inline script to set flow.stream = True
flow = self.c.channel.ask("responseheaders", flow)
if flow is None or flow == KILL:
raise KillSignal()
else:
# now get the rest of the request body, if body still needs to be
# read but not streaming this response
if flow.response.stream:
flow.response.content = CONTENT_MISSING
else:
flow.response.content = http.read_http_body(
self.c.server_conn.rfile, flow.response.headers,
self.c.config.body_size_limit,
flow.request.method, flow.response.code, False
)
flow.response.timestamp_end = utils.timestamp()
def handle_flow(self):
flow = HTTPFlow(self.c.client_conn, self.c.server_conn, self.live)
try:
try:
req = HTTPRequest.from_stream(
self.c.client_conn.rfile,
body_size_limit=self.c.config.body_size_limit
)
except tcp.NetLibDisconnect:
# don't throw an error for disconnects that happen
# before/between requests.
return False
self.c.log(
"request",
"debug",
[req._assemble_first_line(req.form_in)]
)
ret = self.process_request(flow, req)
if ret is not None:
return ret
# Be careful NOT to assign the request to the flow before
# process_request completes. This is because the call can raise an
# exception. If the request object is already attached, this results
# in an Error object that has an attached request that has not been
# sent through to the Master.
flow.request = req
request_reply = self.c.channel.ask("request", flow)
if request_reply is None or request_reply == KILL:
raise KillSignal()
self.process_server_address(flow) # The inline script may have changed request.host
if isinstance(request_reply, HTTPResponse):
flow.response = request_reply
else:
self.get_response_from_server(flow)
# no further manipulation of self.c.server_conn beyond this point
# we can safely set it as the final attribute value here.
flow.server_conn = self.c.server_conn
self.c.log("response", "debug", [flow.response._assemble_first_line()])
response_reply = self.c.channel.ask("response", flow)
if response_reply is None or response_reply == KILL:
raise KillSignal()
self.send_response_to_client(flow)
if self.check_close_connection(flow):
return False
# We sent a CONNECT request to an upstream proxy.
if flow.request.form_in == "authority" and flow.response.code == 200:
# TODO: Possibly add headers (memory consumption/usefulness
# tradeoff) Make sure to add state info before the actual
# processing of the CONNECT request happens. During an SSL
# upgrade, we may receive an SNI indication from the client,
# which resets the upstream connection. If this is the case, we
# must already re-issue the CONNECT request at this point.
self.c.server_conn.state.append(
(
"http", {
"state": "connect",
"host": flow.request.host,
"port": flow.request.port
}
)
)
if not self.process_connect_request((flow.request.host, flow.request.port)):
return False
# If the user has changed the target server on this connection,
# restore the original target server
flow.live.restore_server()
return True # Next flow please.
except (
HttpAuthenticationError,
http.HttpError,
proxy.ProxyError,
tcp.NetLibError,
), e:
self.handle_error(e, flow)
except KillSignal:
self.c.log("Connection killed", "info")
finally:
flow.live = None # Connection is not live anymore.
return False
def handle_server_reconnect(self, state):
if state["state"] == "connect":
send_connect_request(
self.c.server_conn,
state["host"],
state["port"],
update_state=False
)
else: # pragma: nocover
raise RuntimeError("Unknown State: %s" % state["state"])
def handle_error(self, error, flow=None):
message = repr(error)
message_debug = None
if isinstance(error, tcp.NetLibDisconnect):
message = None
message_debug = "TCP connection closed unexpectedly."
elif "tlsv1 alert unknown ca" in message:
message = "TLSv1 Alert Unknown CA: The client does not trust the proxy's certificate."
elif "handshake error" in message:
message_debug = message
message = "SSL handshake error: The client may not trust the proxy's certificate."
if message:
self.c.log(message, level="info")
if message_debug:
self.c.log(message_debug, level="debug")
if flow:
# TODO: no flows without request or with both request and response
# at the moment.
if flow.request and not flow.response:
flow.error = Error(message or message_debug)
self.c.channel.ask("error", flow)
try:
code = getattr(error, "code", 502)
headers = getattr(error, "headers", None)
html_message = message or ""
if message_debug:
html_message += "<pre>%s</pre>" % message_debug
self.send_error(code, html_message, headers)
except:
pass
def send_error(self, code, message, headers):
response = http_status.RESPONSES.get(code, "Unknown")
html_content = """
<html>
<head>
<title>%d %s</title>
</head>
<body>%s</body>
</html>
""" % (code, response, message)
self.c.client_conn.wfile.write("HTTP/1.1 %s %s\r\n" % (code, response))
self.c.client_conn.wfile.write(
"Server: %s\r\n" % self.c.config.server_version
)
self.c.client_conn.wfile.write("Content-type: text/html\r\n")
self.c.client_conn.wfile.write(
"Content-Length: %d\r\n" % len(html_content)
)
if headers:
for key, value in headers.items():
self.c.client_conn.wfile.write("%s: %s\r\n" % (key, value))
self.c.client_conn.wfile.write("Connection: close\r\n")
self.c.client_conn.wfile.write("\r\n")
self.c.client_conn.wfile.write(html_content)
self.c.client_conn.wfile.flush()
def process_request(self, flow, request):
"""
@returns:
True, if the request should not be sent upstream
False, if the connection should be aborted
None, if the request should be sent upstream
(a status code != None should be returned directly by handle_flow)
"""
if not self.skip_authentication:
self.authenticate(request)
# Determine .scheme, .host and .port attributes
# For absolute-form requests, they are directly given in the request.
# For authority-form requests, we only need to determine the request scheme.
# For relative-form requests, we need to determine host and port as well.
if not request.scheme:
request.scheme = "https" if flow.server_conn and flow.server_conn.ssl_established else "http"
if not request.host:
# Host/Port Complication: In upstream mode, use the server we CONNECTed to,
# not the upstream proxy.
if flow.server_conn:
for s in flow.server_conn.state:
if s[0] == "http" and s[1]["state"] == "connect":
request.host, request.port = s[1]["host"], s[1]["port"]
if not request.host and flow.server_conn:
request.host, request.port = flow.server_conn.address.host, flow.server_conn.address.port
# Now we can process the request.
if request.form_in == "authority":
if self.c.client_conn.ssl_established:
raise http.HttpError(
400,
"Must not CONNECT on already encrypted connection"
)
if self.c.config.mode == "regular":
self.c.set_server_address((request.host, request.port))
# Update server_conn attribute on the flow
flow.server_conn = self.c.server_conn
self.c.establish_server_connection()
self.c.client_conn.send(
'HTTP/1.1 200 Connection established\r\n' +
'Content-Length: 0\r\n' +
('Proxy-agent: %s\r\n' % self.c.config.server_version) +
'\r\n'
)
return self.process_connect_request(self.c.server_conn.address)
elif self.c.config.mode == "upstream":
return None
else:
# CONNECT should never occur if we don't expect absolute-form
# requests
pass
elif request.form_in == self.expected_form_in:
request.form_out = self.expected_form_out
if request.form_in == "absolute":
if request.scheme != "http":
raise http.HttpError(
400,
"Invalid request scheme: %s" % request.scheme
)
if self.c.config.mode == "regular":
# Update info so that an inline script sees the correct
# value at flow.server_conn
self.c.set_server_address((request.host, request.port))
flow.server_conn = self.c.server_conn
return None
raise http.HttpError(
400, "Invalid HTTP request form (expected: %s, got: %s)" % (
self.expected_form_in, request.form_in
)
)
def process_server_address(self, flow):
# Depending on the proxy mode, server handling is entirely different
# We provide a mostly unified API to the user, which needs to be
# unfiddled here
# ( See also: https://github.com/mitmproxy/mitmproxy/issues/337 )
address = netlib.tcp.Address((flow.request.host, flow.request.port))
ssl = (flow.request.scheme == "https")
if self.c.config.mode == "upstream":
# The connection to the upstream proxy may have a state we may need
# to take into account.
connected_to = None
for s in flow.server_conn.state:
if s[0] == "http" and s[1]["state"] == "connect":
connected_to = tcp.Address((s[1]["host"], s[1]["port"]))
# We need to reconnect if the current flow either requires a
# (possibly impossible) change to the connection state, e.g. the
# host has changed but we already CONNECTed somewhere else.
needs_server_change = (
ssl != self.c.server_conn.ssl_established
or
# HTTP proxying is "stateless", CONNECT isn't.
(connected_to and address != connected_to)
)
if needs_server_change:
# force create new connection to the proxy server to reset state
self.live.change_server(self.c.server_conn.address, force=True)
if ssl:
send_connect_request(
self.c.server_conn,
address.host,
address.port
)
self.c.establish_ssl(server=True)
else:
# If we're not in upstream mode, we just want to update the host and
# possibly establish TLS. This is a no op if the addresses match.
self.live.change_server(address, ssl=ssl)
flow.server_conn = self.c.server_conn
def send_response_to_client(self, flow):
if not flow.response.stream:
# no streaming:
# we already received the full response from the server and can send
# it to the client straight away.
self.c.client_conn.send(flow.response.assemble())
else:
# streaming:
# First send the headers and then transfer the response
# incrementally:
h = flow.response._assemble_head(preserve_transfer_encoding=True)
self.c.client_conn.send(h)
for chunk in http.read_http_body_chunked(self.c.server_conn.rfile,
flow.response.headers,
self.c.config.body_size_limit, flow.request.method,
flow.response.code, False, 4096):
for part in chunk:
self.c.client_conn.wfile.write(part)
self.c.client_conn.wfile.flush()
flow.response.timestamp_end = utils.timestamp()
def check_close_connection(self, flow):
"""
Checks if the connection should be closed depending on the HTTP
semantics. Returns True, if so.
"""
close_connection = (
http.connection_close(flow.request.httpversion, flow.request.headers) or
http.connection_close(flow.response.httpversion, flow.response.headers) or
http.expected_http_body_size(flow.response.headers, False, flow.request.method,
flow.response.code) == -1)
if close_connection:
if flow.request.form_in == "authority" and flow.response.code == 200:
# Workaround for https://github.com/mitmproxy/mitmproxy/issues/313:
# Some proxies (e.g. Charles) send a CONNECT response with HTTP/1.0 and no Content-Length header
pass
else:
return True
return False
def process_connect_request(self, address):
"""
Process a CONNECT request.
Returns True if the CONNECT request has been processed successfully.
Returns False, if the connection should be closed immediately.
"""
address = tcp.Address.wrap(address)
if self.c.config.check_ignore(address):
self.c.log("Ignore host: %s:%s" % address(), "info")
TCPHandler(self.c, log=False).handle_messages()
return False
else:
self.expected_form_in = "relative"
self.expected_form_out = "relative"
self.skip_authentication = True
# In practice, nobody issues a CONNECT request to send unencrypted HTTP requests afterwards.
# If we don't delegate to TCP mode, we should always negotiate a SSL connection.
#
# FIXME:
# Turns out the previous statement isn't entirely true. Chrome on Windows CONNECTs to :80
# if an explicit proxy is configured and a websocket connection should be established.
# We don't support websocket at the moment, so it fails anyway, but we should come up with
# a better solution to this if we start to support WebSockets.
should_establish_ssl = (
address.port in self.c.config.ssl_ports
or
not self.c.config.check_tcp(address)
)
if should_establish_ssl:
self.c.log("Received CONNECT request to SSL port. Upgrading to SSL...", "debug")
self.c.establish_ssl(server=True, client=True)
self.c.log("Upgrade to SSL completed.", "debug")
if self.c.config.check_tcp(address):
self.c.log("Generic TCP mode for host: %s:%s" % address(), "info")
TCPHandler(self.c).handle_messages()
return False
return True
def authenticate(self, request):
if self.c.config.authenticator:
if self.c.config.authenticator.authenticate(request.headers):
self.c.config.authenticator.clean(request.headers)
else:
raise HttpAuthenticationError(
self.c.config.authenticator.auth_challenge_headers())
return request.headers
class RequestReplayThread(threading.Thread):
name = "RequestReplayThread"
def __init__(self, config, flow, masterq, should_exit):
self.config, self.flow, self.channel = config, flow, controller.Channel(masterq, should_exit)
threading.Thread.__init__(self)
def run(self):
r = self.flow.request
form_out_backup = r.form_out
try:
self.flow.response = None
request_reply = self.channel.ask("request", self.flow)
if request_reply is None or request_reply == KILL:
raise KillSignal()
elif isinstance(request_reply, HTTPResponse):
self.flow.response = request_reply
else:
# In all modes, we directly connect to the server displayed
if self.config.mode == "upstream":
server_address = self.config.mode.get_upstream_server(self.flow.client_conn)[2:]
server = ServerConnection(server_address)
server.connect()
if r.scheme == "https":
send_connect_request(server, r.host, r.port)
server.establish_ssl(self.config.clientcerts, sni=self.flow.server_conn.sni)
r.form_out = "relative"
else:
r.form_out = "absolute"
else:
server_address = (r.host, r.port)
server = ServerConnection(server_address)
server.connect()
if r.scheme == "https":
server.establish_ssl(self.config.clientcerts, sni=self.flow.server_conn.sni)
r.form_out = "relative"
server.send(r.assemble())
self.flow.server_conn = server
self.flow.response = HTTPResponse.from_stream(server.rfile, r.method,
body_size_limit=self.config.body_size_limit)
response_reply = self.channel.ask("response", self.flow)
if response_reply is None or response_reply == KILL:
raise KillSignal()
except (proxy.ProxyError, http.HttpError, tcp.NetLibError) as v:
self.flow.error = Error(repr(v))
self.channel.ask("error", self.flow)
except KillSignal:
self.channel.tell("log", proxy.Log("Connection killed", "info"))
finally:
r.form_out = form_out_backup
| |
# An Python interface to the Scintilla control.
#
# Exposes Python classes that allow you to use Scintilla as
# a "standard" MFC edit control (eg, control.GetTextLength(), control.GetSel()
# plus many Scintilla specific features (eg control.SCIAddStyledText())
from pywin.mfc import window
from pywin import default_scintilla_encoding
import win32con
import win32ui
import win32api
import array
import struct
import string
import os
import scintillacon
# Load Scintilla.dll to get access to the control.
# We expect to find this in the same directory as win32ui.pyd
dllid = None
if win32ui.debug: # If running _d version of Pythonwin...
try:
dllid = win32api.LoadLibrary(os.path.join(os.path.split(win32ui.__file__)[0], "Scintilla_d.DLL"))
except win32api.error: # Not there - we dont _need_ a debug ver, so ignore this error.
pass
if dllid is None:
try:
dllid = win32api.LoadLibrary(os.path.join(os.path.split(win32ui.__file__)[0], "Scintilla.DLL"))
except win32api.error:
pass
if dllid is None:
# Still not there - lets see if Windows can find it by searching?
dllid = win32api.LoadLibrary("Scintilla.DLL")
# null_byte is str in py2k, bytes on py3k
null_byte = "\0".encode('ascii')
## These are from Richedit.h - need to add to win32con or commctrl
EM_GETTEXTRANGE = 1099
EM_EXLINEFROMCHAR = 1078
EM_FINDTEXTEX = 1103
EM_GETSELTEXT = 1086
EM_EXSETSEL = win32con.WM_USER + 55
class ScintillaNotification:
def __init__(self, **args):
self.__dict__.update(args)
class ScintillaControlInterface:
def SCIUnpackNotifyMessage(self, msg):
format = "iiiiPiiiPPiiii"
bytes = win32ui.GetBytes( msg, struct.calcsize(format) )
position, ch, modifiers, modificationType, text_ptr, \
length, linesAdded, msg, wParam, lParam, line, \
foldLevelNow, foldLevelPrev, margin \
= struct.unpack(format, bytes)
return ScintillaNotification(position=position,ch=ch,
modifiers=modifiers, modificationType=modificationType,
text_ptr = text_ptr, length=length, linesAdded=linesAdded,
msg = msg, wParam = wParam, lParam = lParam,
line = line, foldLevelNow = foldLevelNow, foldLevelPrev = foldLevelPrev,
margin = margin)
def SCIAddText(self, text):
self.SendMessage(scintillacon.SCI_ADDTEXT, text.encode(default_scintilla_encoding))
def SCIAddStyledText(self, text, style = None):
# If style is None, text is assumed to be a "native" Scintilla buffer.
# If style is specified, text is a normal string, and the style is
# assumed to apply to the entire string.
if style is not None:
text = list(map(lambda char, style=style: char+chr(style), text))
text = ''.join(text)
self.SendMessage(scintillacon.SCI_ADDSTYLEDTEXT, text.encode(default_scintilla_encoding))
def SCIInsertText(self, text, pos=-1):
# SCIInsertText allows unicode or bytes - but if they are bytes,
# the caller must ensure it is encoded correctly.
if isinstance(text, unicode):
text = text.encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_INSERTTEXT, pos, text + null_byte)
def SCISetSavePoint(self):
self.SendScintilla(scintillacon.SCI_SETSAVEPOINT)
def SCISetUndoCollection(self, collectFlag):
self.SendScintilla(scintillacon.SCI_SETUNDOCOLLECTION, collectFlag)
def SCIBeginUndoAction(self):
self.SendScintilla(scintillacon.SCI_BEGINUNDOACTION)
def SCIEndUndoAction(self):
self.SendScintilla(scintillacon.SCI_ENDUNDOACTION)
def SCIGetCurrentPos(self):
return self.SendScintilla(scintillacon.SCI_GETCURRENTPOS)
def SCIGetCharAt(self, pos):
# Must ensure char is unsigned!
return chr(self.SendScintilla(scintillacon.SCI_GETCHARAT, pos) & 0xFF)
def SCIGotoLine(self, line):
self.SendScintilla(scintillacon.SCI_GOTOLINE, line)
def SCIBraceMatch(self, pos, maxReStyle):
return self.SendScintilla(scintillacon.SCI_BRACEMATCH, pos, maxReStyle)
def SCIBraceHighlight(self, pos, posOpposite):
return self.SendScintilla(scintillacon.SCI_BRACEHIGHLIGHT, pos, posOpposite)
def SCIBraceBadHighlight(self, pos):
return self.SendScintilla(scintillacon.SCI_BRACEBADLIGHT, pos)
####################################
# Styling
# def SCIColourise(self, start=0, end=-1):
# NOTE - dependent on of we use builtin lexer, so handled below.
def SCIGetEndStyled(self):
return self.SendScintilla(scintillacon.SCI_GETENDSTYLED)
def SCIStyleSetFore(self, num, v):
return self.SendScintilla(scintillacon.SCI_STYLESETFORE, num, v)
def SCIStyleSetBack(self, num, v):
return self.SendScintilla(scintillacon.SCI_STYLESETBACK, num, v)
def SCIStyleSetEOLFilled(self, num, v):
return self.SendScintilla(scintillacon.SCI_STYLESETEOLFILLED, num, v)
def SCIStyleSetFont(self, num, name, characterset=0):
buff = (name + "\0").encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_STYLESETFONT, num, buff)
self.SendScintilla(scintillacon.SCI_STYLESETCHARACTERSET, num, characterset)
def SCIStyleSetBold(self, num, bBold):
self.SendScintilla(scintillacon.SCI_STYLESETBOLD, num, bBold)
def SCIStyleSetItalic(self, num, bItalic):
self.SendScintilla(scintillacon.SCI_STYLESETITALIC, num, bItalic)
def SCIStyleSetSize(self, num, size):
self.SendScintilla(scintillacon.SCI_STYLESETSIZE, num, size)
def SCIGetViewWS(self):
return self.SendScintilla(scintillacon.SCI_GETVIEWWS)
def SCISetViewWS(self, val):
self.SendScintilla(scintillacon.SCI_SETVIEWWS, not (val==0))
self.InvalidateRect()
def SCISetIndentationGuides(self, val):
self.SendScintilla(scintillacon.SCI_SETINDENTATIONGUIDES, val)
def SCIGetIndentationGuides(self):
return self.SendScintilla(scintillacon.SCI_GETINDENTATIONGUIDES)
def SCISetIndent(self, val):
self.SendScintilla(scintillacon.SCI_SETINDENT, val)
def SCIGetIndent(self, val):
return self.SendScintilla(scintillacon.SCI_GETINDENT)
def SCIGetViewEOL(self):
return self.SendScintilla(scintillacon.SCI_GETVIEWEOL)
def SCISetViewEOL(self, val):
self.SendScintilla(scintillacon.SCI_SETVIEWEOL, not(val==0))
self.InvalidateRect()
def SCISetTabWidth(self, width):
self.SendScintilla(scintillacon.SCI_SETTABWIDTH, width, 0)
def SCIStartStyling(self, pos, mask):
self.SendScintilla(scintillacon.SCI_STARTSTYLING, pos, mask)
def SCISetStyling(self, pos, attr):
self.SendScintilla(scintillacon.SCI_SETSTYLING, pos, attr)
def SCISetStylingEx(self, ray): # ray is an array.
address, length = ray.buffer_info()
self.SendScintilla(scintillacon.SCI_SETSTYLINGEX, length, address)
def SCIGetStyleAt(self, pos):
return self.SendScintilla(scintillacon.SCI_GETSTYLEAT, pos)
def SCISetMarginWidth(self, width):
self.SendScintilla(scintillacon.SCI_SETMARGINWIDTHN, 1, width)
def SCISetMarginWidthN(self, n, width):
self.SendScintilla(scintillacon.SCI_SETMARGINWIDTHN, n, width)
def SCISetFoldFlags(self, flags):
self.SendScintilla(scintillacon.SCI_SETFOLDFLAGS, flags)
# Markers
def SCIMarkerDefineAll(self, markerNum, markerType, fore, back):
self.SCIMarkerDefine(markerNum, markerType)
self.SCIMarkerSetFore(markerNum, fore)
self.SCIMarkerSetBack(markerNum, back)
def SCIMarkerDefine(self, markerNum, markerType):
self.SendScintilla(scintillacon.SCI_MARKERDEFINE, markerNum, markerType)
def SCIMarkerSetFore(self, markerNum, fore):
self.SendScintilla(scintillacon.SCI_MARKERSETFORE, markerNum, fore)
def SCIMarkerSetBack(self, markerNum, back):
self.SendScintilla(scintillacon.SCI_MARKERSETBACK, markerNum, back)
def SCIMarkerAdd(self, lineNo, markerNum):
self.SendScintilla(scintillacon.SCI_MARKERADD, lineNo, markerNum)
def SCIMarkerDelete(self, lineNo, markerNum):
self.SendScintilla(scintillacon.SCI_MARKERDELETE, lineNo, markerNum)
def SCIMarkerDeleteAll(self, markerNum=-1):
self.SendScintilla(scintillacon.SCI_MARKERDELETEALL, markerNum)
def SCIMarkerGet(self, lineNo):
return self.SendScintilla(scintillacon.SCI_MARKERGET, lineNo)
def SCIMarkerNext(self, lineNo, markerNum):
return self.SendScintilla(scintillacon.SCI_MARKERNEXT, lineNo, markerNum)
def SCICancel(self):
self.SendScintilla(scintillacon.SCI_CANCEL)
# AutoComplete
def SCIAutoCShow(self, text):
if type(text) in [type([]), type(())]:
text = ' '.join(text)
buff = (text + "\0").encode(default_scintilla_encoding)
return self.SendScintilla(scintillacon.SCI_AUTOCSHOW, 0, buff)
def SCIAutoCCancel(self):
self.SendScintilla(scintillacon.SCI_AUTOCCANCEL)
def SCIAutoCActive(self):
return self.SendScintilla(scintillacon.SCI_AUTOCACTIVE)
def SCIAutoCComplete(self):
return self.SendScintilla(scintillacon.SCI_AUTOCCOMPLETE)
def SCIAutoCStops(self, stops):
buff = (stops + "\0").encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_AUTOCSTOPS, 0, buff)
def SCIAutoCSetAutoHide(self, hide):
self.SendScintilla(scintillacon.SCI_AUTOCSETAUTOHIDE, hide)
def SCIAutoCSetFillups(self, fillups):
self.SendScintilla(scintillacon.SCI_AUTOCSETFILLUPS, fillups)
# Call tips
def SCICallTipShow(self, text, pos=-1):
if pos==-1: pos = self.GetSel()[0]
buff = (text + "\0").encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_CALLTIPSHOW, pos, buff)
def SCICallTipCancel(self):
self.SendScintilla(scintillacon.SCI_CALLTIPCANCEL)
def SCICallTipActive(self):
return self.SendScintilla(scintillacon.SCI_CALLTIPACTIVE)
def SCICallTipPosStart(self):
return self.SendScintilla(scintillacon.SCI_CALLTIPPOSSTART)
def SCINewline(self):
self.SendScintilla(scintillacon.SCI_NEWLINE)
# Lexer etc
def SCISetKeywords(self, keywords, kw_list_no = 0):
buff = (keywords+"\0").encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_SETKEYWORDS, kw_list_no, buff)
def SCISetProperty(self, name, value):
name_buff = array.array('b', (name + '\0').encode(default_scintilla_encoding))
val_buff = array.array("b", (str(value)+'\0').encode(default_scintilla_encoding))
address_name_buffer = name_buff.buffer_info()[0]
address_val_buffer = val_buff.buffer_info()[0]
self.SendScintilla(scintillacon.SCI_SETPROPERTY, address_name_buffer, address_val_buffer)
def SCISetStyleBits(self, nbits):
self.SendScintilla(scintillacon.SCI_SETSTYLEBITS, nbits)
# Folding
def SCIGetFoldLevel(self, lineno):
return self.SendScintilla(scintillacon.SCI_GETFOLDLEVEL, lineno)
def SCIToggleFold(self, lineno):
return self.SendScintilla(scintillacon.SCI_TOGGLEFOLD, lineno)
def SCIEnsureVisible(self, lineno):
self.SendScintilla(scintillacon.SCI_ENSUREVISIBLE, lineno)
def SCIGetFoldExpanded(self, lineno):
return self.SendScintilla(scintillacon.SCI_GETFOLDEXPANDED, lineno)
# right edge
def SCISetEdgeColumn(self, edge):
self.SendScintilla(scintillacon.SCI_SETEDGECOLUMN, edge)
def SCIGetEdgeColumn(self):
return self.SendScintilla(scintillacon.SCI_GETEDGECOLUMN)
def SCISetEdgeMode(self, mode):
self.SendScintilla(scintillacon.SCI_SETEDGEMODE, mode)
def SCIGetEdgeMode(self):
return self.SendScintilla(scintillacon.SCI_GETEDGEMODE)
def SCISetEdgeColor(self, color):
self.SendScintilla(scintillacon.SCI_SETEDGECOLOUR, color)
def SCIGetEdgeColor(self):
return self.SendScintilla(scintillacon.SCI_GETEDGECOLOR)
# Multi-doc
def SCIGetDocPointer(self):
return self.SendScintilla(scintillacon.SCI_GETDOCPOINTER)
def SCISetDocPointer(self, p):
return self.SendScintilla(scintillacon.SCI_SETDOCPOINTER, 0, p)
def SCISetWrapMode(self, mode):
return self.SendScintilla(scintillacon.SCI_SETWRAPMODE, mode)
def SCIGetWrapMode(self):
return self.SendScintilla(scintillacon.SCI_GETWRAPMODE)
class CScintillaEditInterface(ScintillaControlInterface):
def close(self):
self.colorizer = None
def Clear(self):
self.SendScintilla(win32con.WM_CLEAR)
def Clear(self):
self.SendScintilla(win32con.WM_CLEAR)
def FindText(self, flags, range, findText):
""" LPARAM for EM_FINDTEXTEX:
typedef struct _findtextex {
CHARRANGE chrg;
LPCTSTR lpstrText;
CHARRANGE chrgText;} FINDTEXTEX;
typedef struct _charrange {
LONG cpMin;
LONG cpMax;} CHARRANGE;
"""
findtextex_fmt='llPll'
## Scintilla does not handle unicode in EM_FINDTEXT msg (FINDTEXTEX struct)
txt_buff = (findText+'\0').encode(default_scintilla_encoding)
txt_array = array.array('b', txt_buff)
ft_buff = struct.pack(findtextex_fmt, range[0], range[1], txt_array.buffer_info()[0], 0, 0)
ft_array = array.array('b', ft_buff)
rc = self.SendScintilla(EM_FINDTEXTEX, flags, ft_array.buffer_info()[0])
ftUnpacked = struct.unpack(findtextex_fmt, ft_array)
return rc, (ftUnpacked[3], ftUnpacked[4])
def GetSel(self):
currentPos = self.SendScintilla(scintillacon.SCI_GETCURRENTPOS)
anchorPos = self.SendScintilla(scintillacon.SCI_GETANCHOR)
if currentPos < anchorPos:
return (currentPos, anchorPos)
else:
return (anchorPos, currentPos)
return currentPos;
def GetSelText(self):
start, end = self.GetSel()
txtBuf = array.array('b', null_byte * (end-start+1))
addressTxtBuf = txtBuf.buffer_info()[0]
# EM_GETSELTEXT is documented as returning the number of chars
# not including the NULL, but scintilla includes the NULL. A
# quick glance at the scintilla impl doesn't make this
# obvious - the NULL is included in the 'selection' object
# and reflected in the length of that 'selection' object.
# I expect that is a bug in scintilla and may be fixed by now,
# but we just blindly assume that the last char is \0 and
# strip it.
self.SendScintilla(EM_GETSELTEXT, 0, addressTxtBuf)
return txtBuf.tostring()[:-1].decode(default_scintilla_encoding)
def SetSel(self, start=0, end=None):
if type(start)==type(()):
assert end is None, "If you pass a point in the first param, the second must be None"
start, end = start
elif end is None:
end = start
if start < 0: start = self.GetTextLength()
if end < 0: end = self.GetTextLength()
assert start <= self.GetTextLength(), "The start postion is invalid (%d/%d)" % (start, self.GetTextLength())
assert end <= self.GetTextLength(), "The end postion is invalid (%d/%d)" % (end, self.GetTextLength())
cr = struct.pack('ll', start, end)
crBuff = array.array('b', cr)
addressCrBuff = crBuff.buffer_info()[0]
rc = self.SendScintilla(EM_EXSETSEL, 0, addressCrBuff)
def GetLineCount(self):
return self.SendScintilla(win32con.EM_GETLINECOUNT)
def LineFromChar(self, charPos=-1):
if charPos==-1: charPos = self.GetSel()[0]
assert charPos >= 0 and charPos <= self.GetTextLength(), "The charPos postion (%s) is invalid (max=%s)" % (charPos, self.GetTextLength())
#return self.SendScintilla(EM_EXLINEFROMCHAR, charPos)
# EM_EXLINEFROMCHAR puts charPos in lParam, not wParam
return self.SendScintilla(EM_EXLINEFROMCHAR, 0, charPos)
def LineIndex(self, line):
return self.SendScintilla(win32con.EM_LINEINDEX, line)
def ScrollCaret(self):
return self.SendScintilla(win32con.EM_SCROLLCARET)
def GetCurLineNumber(self):
return self.LineFromChar(self.SCIGetCurrentPos())
def GetTextLength(self):
return self.SendScintilla(scintillacon.SCI_GETTEXTLENGTH)
def GetTextRange(self, start = 0, end = -1, decode = True):
if end == -1: end = self.SendScintilla(scintillacon.SCI_GETTEXTLENGTH)
assert end>=start, "Negative index requested (%d/%d)" % (start, end)
assert start >= 0 and start <= self.GetTextLength(), "The start postion is invalid"
assert end >= 0 and end <= self.GetTextLength(), "The end postion is invalid"
initer = null_byte * (end - start + 1)
buff = array.array('b', initer)
addressBuffer = buff.buffer_info()[0]
tr = struct.pack('llP', start, end, addressBuffer)
trBuff = array.array('b', tr)
addressTrBuff = trBuff.buffer_info()[0]
num_bytes = self.SendScintilla(EM_GETTEXTRANGE, 0, addressTrBuff)
ret = buff.tostring()[:num_bytes]
if decode:
ret = ret.decode(default_scintilla_encoding)
return ret
def ReplaceSel(self, str):
buff = (str + "\0").encode(default_scintilla_encoding)
self.SendScintilla(scintillacon.SCI_REPLACESEL, 0, buff)
def GetLine(self, line=-1):
if line == -1: line = self.GetCurLineNumber()
start = self.LineIndex(line)
end = self.LineIndex(line+1)
return self.GetTextRange(start, end)
def SetReadOnly(self, flag = 1):
return self.SendScintilla(win32con.EM_SETREADONLY, flag)
def LineScroll(self, lines, cols=0):
return self.SendScintilla(win32con.EM_LINESCROLL, cols, lines)
def GetFirstVisibleLine(self):
return self.SendScintilla(win32con.EM_GETFIRSTVISIBLELINE)
def SetWordWrap(self, mode):
if mode != win32ui.CRichEditView_WrapNone:
raise ValueError("We dont support word-wrap (I dont think :-)")
class CScintillaColorEditInterface(CScintillaEditInterface):
################################
# Plug-in colorizer support
def _GetColorizer(self):
if not hasattr(self, "colorizer"):
self.colorizer = self._MakeColorizer()
return self.colorizer
def _MakeColorizer(self):
# Give parent a chance to hook.
parent_func = getattr(self.GetParentFrame(), "_MakeColorizer", None)
if parent_func is not None:
return parent_func()
import formatter
## return formatter.PythonSourceFormatter(self)
return formatter.BuiltinPythonSourceFormatter(self)
def Colorize(self, start=0, end=-1):
c = self._GetColorizer()
if c is not None: c.Colorize(start, end)
def ApplyFormattingStyles(self, bReload=1):
c = self._GetColorizer()
if c is not None: c.ApplyFormattingStyles(bReload)
# The Parent window will normally hook
def HookFormatter(self, parent = None):
c = self._GetColorizer()
if c is not None: # No need if we have no color!
c.HookFormatter(parent)
class CScintillaEdit(window.Wnd, CScintillaColorEditInterface):
def __init__(self, wnd=None):
if wnd is None:
wnd = win32ui.CreateWnd()
window.Wnd.__init__(self, wnd)
def SendScintilla(self, msg, w=0, l=0):
return self.SendMessage(msg, w, l)
def CreateWindow(self, style, rect, parent, id):
self._obj_.CreateWindow(
"Scintilla",
"Scintilla",
style,
rect,
parent,
id,
None)
| |
'''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
This script tests node delayed rediscovery.
This test takes 10 minutes to run and runs against a single node, random or
specified via nodeid. Delayed rediscovery will start and script will send command to reboot the
node. The test will check to see the catalogs get refreshed after the rediscovery.
example:
python run_tests.py -stack 4 -test tests/compute/test_api20_node_delayed_rediscovery.py [-nodeid <nodeid>]
'''
import fit_path # NOQA: unused import
import fit_common
import flogging
import random
import string
import test_api_utils
import time
import unittest
from json import loads, dumps
from nosedep import depends
from nose.plugins.attrib import attr
log = flogging.get_loggers()
def random_user_generator(pre_user):
# The test will inject a random user name into the nodes BMC data.
# This value is used to check that the catalog data gets updated
# after rediscovery.
user = ''.join(random.choice(string.lowercase) for i in range(10))
count = 0
while (user == pre_user):
user = ''.join(random.choice(string.lowercase) for i in range(10))
count = count + 1
if count == 10:
return None
return user
def wait_for_workflow_complete(instanceid, start_time, wait_time=2700, cycle=30):
# This routine polls a workflow task ID for completion
log.info(" Workflow started at time: %s", str(time.asctime()))
while time.time() - start_time < wait_time: # limit test to waittime seconds
result = fit_common.rackhdapi("/api/2.0/workflows/" + instanceid)
status = result['json']['status']
injectableName = result['json']['injectableName']
if result['status'] != 200:
log.error("HTTP error: %s", result['text'])
return False
if status in ['running', 'pending']:
log.info(" %s workflow status: %s", injectableName, status)
fit_common.time.sleep(cycle)
elif status == 'succeeded':
log.info(" %s workflow status: %s", injectableName, status)
log.info(" Workflow completed at time: %s", str(time.asctime()))
return True
else:
error = result['text']
log.error(" Workflow failed: status: %s text: %s", status, error)
return False
log.error("Workflow Timeout: %s", result['text'])
return False
def get_ipmi_user(self, nodeid):
# Get the IPMI user for node
result = fit_common.rackhdapi('/api/2.0/nodes/' + nodeid + '/catalogs/ipmi-user-list-1', action='get')
self.assertEqual(result['status'], 200, msg="IPMI user list catalog could not be retrieved.")
self.assertGreater(len(result['json']), 0, msg=("Node %s IPMI user catalog has 0 length" % nodeid))
try:
ipmi_user = result['json']['data']['6']['']
except KeyError:
try:
ipmi_user = result['json']['data']['6']['admin']
except KeyError:
ipmi_user = None
return ipmi_user
@attr(all=False, regression=False, smoke=False, refresh_group=True)
class api20_node_rediscovery(unittest.TestCase):
@classmethod
def setUpClass(cls):
# class method run once per script
# default base payload for Rediscovery Graph
cls.__payload = {
"name": "Graph.Refresh.Delayed.Discovery",
"options": {
"discovery-refresh-graph": {
"graphOptions": {
"target": "NODEID"
},
"nodeId": "NODEID"
},
"generate-sku": {
"nodeId": "NODEID"
},
"generate-enclosure": {
"nodeId": "NODEID"
},
"create-default-pollers": {
"nodeId": "NODEID"
},
"run-sku-graph": {
"nodeId": "NODEID"
},
"nodeId": "NODEID"
}
}
# Get the list of nodes
nodelist = fit_common.node_select(no_unknown_nodes=True)
assert (len(nodelist) != 0), "No valid nodes discovered"
# Select one node at random
cls.__nodeid = nodelist[random.randint(0, len(nodelist) - 1)]
# Delete active workflows for specified node
fit_common.cancel_active_workflows(cls.__nodeid)
cls.__previous_ipmi_user = None
def setUp(self):
# test method runs at the start of each test
self.__nodeid = self.__class__.__nodeid
self.__payload = self.__class__.__payload
self.__previous_ipmi_user = self.__class__.__previous_ipmi_user
def test01_node_check(self):
# Get node data
node = fit_common.rackhdapi('/api/2.0/nodes/' + self.__nodeid)['json']
nodesku = fit_common.rackhdapi(node.get('sku'))['json']['name']
log.info(" Node ID: %s", self.__nodeid)
log.info(" Node SKU: %s ", nodesku)
log.info(" Graph Name: %s", self.__payload['name'])
# Ensure the compute node is powered on and reachable
result = fit_common.rackhdapi('/api/2.0/nodes/' + self.__nodeid + '/workflows', action='post',
payload={"name": "Graph.PowerOn.Node"})
self.assertEqual(result['status'], 201, msg="Node Power on workflow API failed, see logs.")
instanceId = result['json']['instanceId']
self.assertTrue(wait_for_workflow_complete(instanceId, time.time(), 50, 5),
msg="Node Power on workflow failed, see logs.")
@depends(after=test01_node_check)
def test02_create_node_user(self):
# Get previous IPMI user from RackHD node catalog
self.__previous_ipmi_user = get_ipmi_user(self, self.__nodeid)
# Set new IPMI user on node
user = random_user_generator(self.__previous_ipmi_user)
self.assertNotEqual(user, None, msg="Error generating IPMI username")
command = "user set name 6 " + user
result = test_api_utils.run_ipmi_command_to_node(self.__nodeid, command)
self.assertEqual(result['exitcode'], 0, msg="Error setting node username")
@depends(after=test02_create_node_user)
def test03_refresh_delayed(self):
# Execute delayed rediscovery which refreshes the node catalog
temp_payload = dumps(self.__payload)
workflow_payload = loads(temp_payload.replace("NODEID", self.__nodeid))
log.debug(" Payload: %s", workflow_payload)
result = fit_common.rackhdapi('/api/2.0/workflows', action='post', payload=workflow_payload)
self.assertEqual(result['status'], 201,
msg='Was expecting code 201. Got ' + str(result['status']))
graphId = result['json']['context']['graphId']
# Send command to reboot node
command = "chassis power reset"
result = test_api_utils.run_ipmi_command_to_node(self.__nodeid, command)
self.assertEqual(result['exitcode'], 0, msg="Error rebooting node")
self.assertTrue(wait_for_workflow_complete(graphId, time.time()), "Delayed rediscovery workflow failed")
@depends(after=test03_refresh_delayed)
def test04_verify_rediscovery(self):
# get the Ipmi user catalog for node
new_ipmi_user = get_ipmi_user(self, self.__nodeid)
self.assertNotEqual(new_ipmi_user, None, msg="IPMI user didn't get created or cataloged correctly")
self.assertNotEqual(new_ipmi_user, self.__previous_ipmi_user, msg="IPMI user didn't change after rediscovery")
if __name__ == '__main__':
fit_common.unittest.main()
| |
import numpy
import unittest
import theano
from theano.tensor import Tensor, TensorType
from theano.compile.sharedvalue import shared
from theano.compile.sharedvalue import SharedVariable
from theano.compile.sharedvalue import generic
class Test_SharedVariable(unittest.TestCase):
def test_ctors(self):
if theano.configdefaults.python_int_bitwidth() == 32:
assert shared(7).type == theano.tensor.iscalar, shared(7).type
else:
assert shared(7).type == theano.tensor.lscalar, shared(7).type
assert shared(7.0).type == theano.tensor.dscalar
assert shared(numpy.float32(7)).type == theano.tensor.fscalar
# test tensor constructor
b = shared(numpy.zeros((5, 5), dtype='int32'))
assert b.type == TensorType('int32', broadcastable=[False, False])
b = shared(numpy.random.rand(4, 5))
assert b.type == TensorType('float64', broadcastable=[False, False])
b = shared(numpy.random.rand(5, 1, 2))
assert b.type == TensorType('float64', broadcastable=[False, False, False])
assert shared([]).type == generic
def badfunc():
shared(7, bad_kw=False)
self.assertRaises(TypeError, badfunc)
def test_strict_generic(self):
# this should work, because
# generic can hold anything even when strict=True
u = shared('asdf', strict=False)
v = shared('asdf', strict=True)
u.set_value(88)
v.set_value(88)
def test_create_numpy_strict_false(self):
# here the value is perfect, and we're not strict about it,
# so creation should work
SharedVariable(
name='u',
type=Tensor(broadcastable=[False], dtype='float64'),
value=numpy.asarray([1., 2.]),
strict=False)
# here the value is castable, and we're not strict about it,
# so creation should work
SharedVariable(
name='u',
type=Tensor(broadcastable=[False], dtype='float64'),
value=[1., 2.],
strict=False)
# here the value is castable, and we're not strict about it,
# so creation should work
SharedVariable(
name='u',
type=Tensor(broadcastable=[False], dtype='float64'),
value=[1, 2], # different dtype and not a numpy array
strict=False)
# here the value is not castable, and we're not strict about it,
# this is beyond strictness, it must fail
try:
SharedVariable(
name='u',
type=Tensor(broadcastable=[False], dtype='float64'),
value=dict(), # not an array by any stretch
strict=False)
assert 0
except TypeError:
pass
def test_use_numpy_strict_false(self):
# here the value is perfect, and we're not strict about it,
# so creation should work
u = SharedVariable(
name='u',
type=Tensor(broadcastable=[False], dtype='float64'),
value=numpy.asarray([1., 2.]),
strict=False)
# check that assignments to value are cast properly
u.set_value([3, 4])
assert type(u.get_value()) is numpy.ndarray
assert str(u.get_value(borrow=True).dtype) == 'float64'
assert numpy.all(u.get_value() == [3, 4])
# check that assignments of nonsense fail
try:
u.set_value('adsf')
assert 0
except ValueError:
pass
# check that an assignment of a perfect value results in no copying
uval = theano._asarray([5, 6, 7, 8], dtype='float64')
u.set_value(uval, borrow=True)
assert u.get_value(borrow=True) is uval
def test_scalar_strict(self):
def f(var, val):
var.set_value(val)
b = shared(numpy.int64(7), strict=True)
assert b.type == theano.tensor.lscalar
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int32(7), strict=True)
assert b.type == theano.tensor.iscalar
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int16(7), strict=True)
assert b.type == theano.tensor.wscalar
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int8(7), strict=True)
assert b.type == theano.tensor.bscalar
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.float64(7.234), strict=True)
assert b.type == theano.tensor.dscalar
self.assertRaises(TypeError, f, b, 8)
b = shared(numpy.float32(7.234), strict=True)
assert b.type == theano.tensor.fscalar
self.assertRaises(TypeError, f, b, 8)
b = shared(numpy.float(7.234), strict=True)
assert b.type == theano.tensor.dscalar
self.assertRaises(TypeError, f, b, 8)
b = shared(7.234, strict=True)
assert b.type == theano.tensor.dscalar
self.assertRaises(TypeError, f, b, 8)
b = shared(numpy.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
def test_tensor_strict(self):
def f(var, val):
var.set_value(val)
b = shared(numpy.int64([7]), strict=True)
assert b.type == theano.tensor.lvector
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int32([7]), strict=True)
assert b.type == theano.tensor.ivector
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int16([7]), strict=True)
assert b.type == theano.tensor.wvector
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int8([7]), strict=True)
assert b.type == theano.tensor.bvector
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.float64([7.234]), strict=True)
assert b.type == theano.tensor.dvector
self.assertRaises(TypeError, f, b, 8)
b = shared(numpy.float32([7.234]), strict=True)
assert b.type == theano.tensor.fvector
self.assertRaises(TypeError, f, b, 8)
# numpy.float([7.234]) don't work
# b = shared(numpy.float([7.234]), strict=True)
# assert b.type == theano.tensor.dvector
# self.assertRaises(TypeError, f, b, 8)
# This generate a generic type. Should we cast? I don't think.
# b = shared([7.234], strict=True)
# assert b.type == theano.tensor.dvector
# self.assertRaises(TypeError, f, b, 8)
b = shared(numpy.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
def test_scalar_floatX(self):
# the test should assure that floatX is not used in the shared
# constructor for scalars Shared values can change, and since we don't
# know the range they might take, we should keep the same
# bit width / precision as the original value used to create the
# shared variable.
# Since downcasting of a value now raises an Exception,
def f(var, val):
var.set_value(val)
b = shared(numpy.int64(7), allow_downcast=True)
assert b.type == theano.tensor.lscalar
f(b, 8.23)
assert b.get_value() == 8
b = shared(numpy.int32(7), allow_downcast=True)
assert b.type == theano.tensor.iscalar
f(b, 8.23)
assert b.get_value() == 8
b = shared(numpy.int16(7), allow_downcast=True)
assert b.type == theano.tensor.wscalar
f(b, 8.23)
assert b.get_value() == 8
b = shared(numpy.int8(7), allow_downcast=True)
assert b.type == theano.tensor.bscalar
f(b, 8.23)
assert b.get_value() == 8
b = shared(numpy.float64(7.234), allow_downcast=True)
assert b.type == theano.tensor.dscalar
f(b, 8)
assert b.get_value() == 8
b = shared(numpy.float32(7.234), allow_downcast=True)
assert b.type == theano.tensor.fscalar
f(b, 8)
assert b.get_value() == 8
b = shared(numpy.float(7.234), allow_downcast=True)
assert b.type == theano.tensor.dscalar
f(b, 8)
assert b.get_value() == 8
b = shared(7.234, allow_downcast=True)
assert b.type == theano.tensor.dscalar
f(b, 8)
assert b.get_value() == 8
b = shared(numpy.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
def test_tensor_floatX(self):
def f(var, val):
var.set_value(val)
b = shared(numpy.int64([7]), allow_downcast=True)
assert b.type == theano.tensor.lvector
f(b, [8.23])
assert b.get_value() == 8
b = shared(numpy.int32([7]), allow_downcast=True)
assert b.type == theano.tensor.ivector
f(b, [8.23])
assert b.get_value() == 8
b = shared(numpy.int16([7]), allow_downcast=True)
assert b.type == theano.tensor.wvector
f(b, [8.23])
assert b.get_value() == 8
b = shared(numpy.int8([7]), allow_downcast=True)
assert b.type == theano.tensor.bvector
f(b, [8.23])
assert b.get_value() == 8
b = shared(numpy.float64([7.234]), allow_downcast=True)
assert b.type == theano.tensor.dvector
f(b, [8])
assert b.get_value() == 8
b = shared(numpy.float32([7.234]), allow_downcast=True)
assert b.type == theano.tensor.fvector
f(b, [8])
assert b.get_value() == 8
# numpy.float([7.234]) don't work
# b = shared(numpy.float([7.234]))
# assert b.type == theano.tensor.dvector
# f(b,[8])
# This generate a generic type. Should we cast? I don't think.
# b = shared([7.234])
# assert b.type == theano.tensor.dvector
# f(b,[8])
b = shared(numpy.asarray([7.234], dtype=theano.config.floatX),
allow_downcast=True)
assert b.dtype == theano.config.floatX
f(b, [8])
assert b.get_value() == 8
b = shared(numpy.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
def test_err_symbolic_variable(self):
self.assertRaises(TypeError, shared, theano.tensor.ones((2, 3)))
shared(numpy.ones((2, 4)))
| |
import unittest
from random import randint
import urwid as uw
from urwid import ExitMainLoop
from findtui.model import FindModel
from findtui.options import MENUS, OPTIONS
from findtui.view import (FindView, exit_on_keys, CLR_RADIO_CHOOSE,
JUMP_TO_MENUS, JUMP_TO_COMMAND, JUMP_TO_OPTIONS,
TRIGGER_COMPLETITION)
ExitMainLoopException = ExitMainLoop().__class__
class ViewTest(unittest.TestCase):
# helpers
def assert_notice_board_have_items(self, n):
self.assertEqual(len(self.view.notice_board.original_widget.body), n)
def get_option(self, n):
"""get the tool component of option n"""
opts = self.view.options_panel.original_widget.contents()
return opts[n][0].original_widget.contents[1][0]
def choose_menu(self, n):
"""choose the Nth menu, start from zero"""
self.view.menu_chosen(n, uw.Button(MENUS[n]))
def press(self, key):
"""imitate a key is pressed"""
self.view.filter_short_keys([key], [])
return self
def cmd(self):
"""return the text in command_input"""
return self.view.command_input.edit_text
def assert_options_is_from_menu(self, n):
"""assert the contents of options_panel comes from MENUS[n]"""
options = self.view.options_panel.original_widget.body
self.assertEqual(len(options), len(OPTIONS[MENUS[n]]),
"options' contents is incorrect")
def setUp(self):
self.model = FindModel()
self.view = FindView(self.model)
def test_exit_on_keys(self):
with self.assertRaises(ExitMainLoopException):
exit_on_keys('q')
from findtui.view import EXIT_WITH_SUCCESS
self.assertFalse(EXIT_WITH_SUCCESS)
with self.assertRaises(ExitMainLoopException):
exit_on_keys('ctrl d')
def test_run_on_keys(self):
with self.assertRaises(ExitMainLoopException):
exit_on_keys('ctrl r')
from findtui.view import EXIT_WITH_SUCCESS
self.assertTrue(EXIT_WITH_SUCCESS)
# Unable to test specified EXIT_KEY and RUN_KEY.
# They have been initialized once view.py is imported
# Fake user input
def test_click_ok_button(self):
with self.assertRaises(ExitMainLoopException):
self.view.ok_button.keypress((1,1), 'enter')
from findtui.view import EXIT_WITH_SUCCESS
self.assertTrue(EXIT_WITH_SUCCESS)
def test_click_reset_button(self):
self.model.path = "path"
self.model.exec_cmd = "-exec du -h {} ;"
self.view.reset_button.keypress((1,1), 'enter')
self.assertEqual("find path -exec du -h {} ;",
self.cmd())
def test_change_path_input(self):
self.view.path_input.set_edit_text("path")
cmd = "find path"
self.assertEqual(cmd, self.cmd())
def test_change_actions_input(self):
self.view.actions_input.set_edit_text("du -h")
cmd = "find -exec du -h {} ;"
self.assertEqual(cmd, self.cmd())
def test_change_command_input(self):
self.view.command_input.set_edit_text("find this")
self.assertEqual(self.model.cmd.path, "this")
def test_jump_to_menus(self):
self.press(JUMP_TO_MENUS)
# .contents => ((widget, options), ...)
self.assertEqual(self.view.frame.body.focus.contents[0][0],
self.view.menus)
self.assertEqual(self.view.menus.focus_position,
self.view.current_selected_menu_idx)
def test_jump_to_options_panel(self):
self.press(JUMP_TO_OPTIONS)
self.assertEqual(self.view.frame.body.focus,
self.view.options_panel)
def test_jump_to_command_input(self):
self.press(JUMP_TO_COMMAND)
self.assertEqual(self.view.frame.body.focus.contents[0][0],
self.view.command_input)
def test_focus_middle_menu_at_first(self):
middle = len(MENUS) // 2 - 1
self.assertEqual(self.view.menus.focus_position, middle)
self.assertEqual(self.view.current_selected_menu_idx, middle)
def test_press_up_on_first_option_jump_to_current_menu(self):
# First, we need to focus on the first option of options_panel
self.view.frame.body.focus_position = self.view.focus_order('options_panel')
self.view.options_panel.original_widget.focus_position = 0
self.press('up')
self.assertEqual(self.view.frame.body.focus, self.view.menus_area)
self.assertEqual(self.view.menus.focus_position,
self.view.current_selected_menu_idx)
def assert_board_not_changed(self, old_board):
new_board = self.view.notice_board.original_widget
self.assertNotEqual(new_board, old_board)
return new_board
def test_focus_on_options_change_notice(self):
self.choose_menu(0)
# First, we need to focus on the options_panel
# and make sure first option not focused
self.view.frame.body.focus_position = self.view.focus_order('options_panel')
self.view.options_panel.original_widget.focus_position = 1
old_board = self.view.notice_board.original_widget
self.view.options_panel.original_widget.focus_position = 0
# Notice that each call of create_xxx_board will create a different board
old_board = self.assert_board_not_changed(old_board)
self.view.options_panel.original_widget.focus_position = 1
old_board = self.assert_board_not_changed(old_board)
size = len(OPTIONS[MENUS[0]])
self.view.options_panel.original_widget.focus_position = size - 1
self.assertNotEqual(self.view.notice_board.original_widget, old_board)
def assert_example_correct(self, example):
self.assertEqual(example,
self.view.notice_board.original_widget.contents()[0][0].contents[0][0].text)
def test_create_correct_example(self):
self.choose_menu(0)
self.view.frame.body.focus_position = self.view.focus_order('options_panel')
self.view.options_panel.original_widget.focus_position = 1
example = OPTIONS[MENUS[0]][1].example
self.assert_example_correct(example)
self.view.options_panel.original_widget.focus_position = 0
self.assert_example_correct(OPTIONS[MENUS[0]][0].example)
self.view.options_panel.original_widget.focus_position = 1
self.assert_example_correct(example)
def test_focus_on_menu_change_options(self):
the_last = len(MENUS) - 1
self.view.menus.focus_position = the_last
self.assertEqual(self.view.current_selected_menu_idx, the_last)
self.assert_options_is_from_menu(the_last)
def test_press_completion_trigger_on_path_input(self):
self.view.frame.body.focus_position = self.view.focus_order('path_input')
self.view.path_input.set_edit_text('.g')
self.press(TRIGGER_COMPLETITION)
# .git, .gitignore
self.assert_notice_board_have_items(2)
def test_press_completion_trigger_on_command_input(self):
self.view.frame.body.focus_position = self.view.focus_order('command_input')
self.view.command_input.set_edit_text('find fa .g')
self.press(TRIGGER_COMPLETITION)
# .git, .gitignore
self.assert_notice_board_have_items(2)
self.view.command_input.set_edit_text('find afas -a')
self.press(TRIGGER_COMPLETITION)
# 'amin', 'anewer', 'atime'
self.assert_notice_board_have_items(3)
def test_press_completion_trigger_on_invalid_place(self):
self.view.frame.body.focus_position = self.view.focus_order('options_panel')
# Now the focus is on 'false'
self.press(TRIGGER_COMPLETITION)
# don't trigger completion on NON-PATH_INPUT_OPTION
self.assert_notice_board_have_items(0)
def test_press_completion_trigger_on_path_input_option(self):
self.choose_menu(1) # Name
self.view.frame.body.focus_position = self.view.focus_order('options_panel')
# Now the focus is on 'ilname'
focused = self.get_option(0)
focused.set_edit_text('.g')
self.press(TRIGGER_COMPLETITION)
# .git, .gitignore
self.assert_notice_board_have_items(2)
cwc = self.view.component_waited_completed
# change on path_input
self.assertEqual(cwc, focused)
self.assertEqual(cwc.edit_text, '.git')
self.assertEqual(cwc.edit_pos, 4)
# ACTIONS
def test_complete_btn_clicked(self):
ed = uw.Edit('')
ed.set_edit_text('a b')
ed2 = uw.Edit('')
ed2.set_edit_text('b')
btn = uw.Button('')
self.view.component_waited_completed = ed
self.view.complete_btn_clicked(btn, 'blind')
self.assertEqual(ed.edit_text, 'a blind ')
self.view.component_waited_completed = ed2
self.view.complete_btn_clicked(btn, 'blind')
self.assertEqual(ed2.edit_text, 'blind ')
def test_menu_chosen(self):
self.choose_menu(1)
self.assertEqual(self.view.current_selected_menu_idx, 1)
self.assert_options_is_from_menu(1)
def test_opt_radio_button_changed(self):
bgroup = []
rb = uw.RadioButton(bgroup, 'some', 'first True')
self.view.opt_radio_button_changed(rb, False, {'option_name': 'opt'})
self.assertEqual(self.model.options_str, "")
self.view.opt_radio_button_changed(rb, True, {'option_name': 'opt'})
self.assertEqual(self.model.options_str, "-opt some")
self.assertEqual(self.cmd(), "find -opt some")
def test_opt_radio_button_changed_clear(self):
bgroup = []
rb = uw.RadioButton(bgroup, 'some', 'first True')
self.view.opt_radio_button_changed(rb, True, {'option_name': 'opt'})
clear = uw.RadioButton(bgroup, CLR_RADIO_CHOOSE, 'first True')
self.view.opt_radio_button_changed(clear, True, {'option_name': 'opt'})
self.assertEqual(self.model.options_str, "")
self.assertEqual(self.cmd().rstrip(), "find")
self.view.opt_radio_button_changed(rb, True, {'option_name': 'opt'})
self.assertEqual(self.model.options_str, "-opt some")
def test_opt_radio_button_changed_clear2(self):
bgroup1 = []
bgroup2 = []
rb1 = uw.RadioButton(bgroup1, 'some', 'first True')
self.view.opt_radio_button_changed(rb1, True, {'option_name': 'opt'})
rb2 = uw.RadioButton(bgroup2, 'some', 'first True')
self.view.opt_radio_button_changed(rb2, True, {'option_name': 'name'})
clear = uw.RadioButton(bgroup1, CLR_RADIO_CHOOSE, 'first True')
self.view.opt_radio_button_changed(clear, True, {'option_name': 'opt'})
self.assertEqual(self.model.options_str, "-name some")
def test_opt_checkbox_changed(self):
cb = uw.CheckBox('')
self.view.opt_checkbox_changed(cb, True, {'option_name': 'opt'})
self.assertEqual(self.model.options_str, "-opt ")
self.assertEqual(self.cmd(), "find -opt")
self.view.opt_checkbox_changed(cb, False, {'option_name': 'opt'})
self.assertEqual(self.model.options_str, "")
self.assertEqual(self.cmd().rstrip(), "find")
def test_opt_path_input_changed(self):
pi = uw.Edit()
self.view.opt_path_input_changed('opt', pi, "fi")
self.assertEqual(self.model.options_str, "-opt fi")
self.assertEqual(self.cmd(), "find -opt fi")
self.view.opt_path_input_changed('opt', pi, "Re")
self.assertEqual(self.model.options_str, "-opt Re")
self.view.opt_path_input_changed('opt', pi, "")
self.assertEqual(self.model.options_str, "")
def test_opt_text_input_changed(self):
ti = uw.Edit()
self.view.opt_path_input_changed('opt', ti, "some")
self.assertEqual(self.model.options_str, "-opt some")
self.assertEqual(self.cmd(), "find -opt some")
self.view.opt_path_input_changed('opt', ti, "else")
self.assertEqual(self.model.options_str, "-opt else")
self.view.opt_path_input_changed('opt', ti, "")
self.assertEqual(self.model.options_str, "")
def test_opt_int_input_changed(self):
pi = uw.Edit()
self.view.opt_path_input_changed('opt', pi, 3)
self.assertEqual(self.model.options_str, "-opt 3")
self.assertEqual(self.cmd(), "find -opt 3")
self.view.opt_path_input_changed('opt', pi, 4)
self.assertEqual(self.model.options_str, "-opt 4")
self.view.opt_path_input_changed('opt', pi, "")
self.assertEqual(self.model.options_str, "")
# create GUI
def test_create_notice_board(self):
listBox = uw.ListBox
example = """It is an example"""
self.assertIsInstance(self.view.create_example_board(example),
listBox)
self.assertIsInstance(self.view.create_completion_board([]), listBox)
data = [('text', 'data'), ('text', 'data')]
# path or other
self.assertIsInstance(self.view.create_completion_board(data), listBox)
data = [('text', 'text'), ('text', 'text')]
# options
self.assertIsInstance(self.view.create_completion_board(data), listBox)
def test_create_options(self):
choice = MENUS[randint(0, len(MENUS)-1)]
self.assertIsInstance(self.view.create_options(choice), uw.ListBox)
| |
from __future__ import absolute_import, unicode_literals
import sys
import pytest
from case import Mock, skip
from celery.five import PY3, long_t, python_2_unicode_compatible, string
from celery.local import PromiseProxy, Proxy, maybe_evaluate, try_import
class test_try_import:
def test_imports(self):
assert try_import(__name__)
def test_when_default(self):
default = object()
assert try_import('foobar.awqewqe.asdwqewq', default) is default
class test_Proxy:
def test_std_class_attributes(self):
assert Proxy.__name__ == 'Proxy'
assert Proxy.__module__ == 'celery.local'
assert isinstance(Proxy.__doc__, str)
def test_doc(self):
def real():
pass
x = Proxy(real, __doc__='foo')
assert x.__doc__ == 'foo'
def test_name(self):
def real():
"""real function"""
return 'REAL'
x = Proxy(lambda: real, name='xyz')
assert x.__name__ == 'xyz'
y = Proxy(lambda: real)
assert y.__name__ == 'real'
assert x.__doc__ == 'real function'
assert x.__class__ == type(real)
assert x.__dict__ == real.__dict__
assert repr(x) == repr(real)
assert x.__module__
def test_get_current_local(self):
x = Proxy(lambda: 10)
object.__setattr__(x, '_Proxy_local', Mock())
assert x._get_current_object()
def test_bool(self):
class X(object):
def __bool__(self):
return False
__nonzero__ = __bool__
x = Proxy(lambda: X())
assert not x
def test_slots(self):
class X(object):
__slots__ = ()
x = Proxy(X)
with pytest.raises(AttributeError):
x.__dict__
@skip.if_python3()
def test_unicode(self):
@python_2_unicode_compatible
class X(object):
def __unicode__(self):
return 'UNICODE'
__str__ = __unicode__
def __repr__(self):
return 'REPR'
x = Proxy(lambda: X())
assert string(x) == 'UNICODE'
del(X.__unicode__)
del(X.__str__)
assert string(x) == 'REPR'
def test_dir(self):
class X(object):
def __dir__(self):
return ['a', 'b', 'c']
x = Proxy(lambda: X())
assert dir(x) == ['a', 'b', 'c']
class Y(object):
def __dir__(self):
raise RuntimeError()
y = Proxy(lambda: Y())
assert dir(y) == []
def test_getsetdel_attr(self):
class X(object):
a = 1
b = 2
c = 3
def __dir__(self):
return ['a', 'b', 'c']
v = X()
x = Proxy(lambda: v)
assert x.__members__ == ['a', 'b', 'c']
assert x.a == 1
assert x.b == 2
assert x.c == 3
setattr(x, 'a', 10)
assert x.a == 10
del(x.a)
assert x.a == 1
def test_dictproxy(self):
v = {}
x = Proxy(lambda: v)
x['foo'] = 42
assert x['foo'] == 42
assert len(x) == 1
assert 'foo' in x
del(x['foo'])
with pytest.raises(KeyError):
x['foo']
assert iter(x)
def test_listproxy(self):
v = []
x = Proxy(lambda: v)
x.append(1)
x.extend([2, 3, 4])
assert x[0] == 1
assert x[:-1] == [1, 2, 3]
del(x[-1])
assert x[:-1] == [1, 2]
x[0] = 10
assert x[0] == 10
assert 10 in x
assert len(x) == 3
assert iter(x)
x[0:2] = [1, 2]
del(x[0:2])
assert str(x)
if sys.version_info[0] < 3:
assert x.__cmp__(object()) == -1
def test_complex_cast(self):
class O(object):
def __complex__(self):
return complex(10.333)
o = Proxy(O)
assert o.__complex__() == complex(10.333)
def test_index(self):
class O(object):
def __index__(self):
return 1
o = Proxy(O)
assert o.__index__() == 1
def test_coerce(self):
class O(object):
def __coerce__(self, other):
return self, other
o = Proxy(O)
assert o.__coerce__(3)
def test_int(self):
assert Proxy(lambda: 10) + 1 == Proxy(lambda: 11)
assert Proxy(lambda: 10) - 1 == Proxy(lambda: 9)
assert Proxy(lambda: 10) * 2 == Proxy(lambda: 20)
assert Proxy(lambda: 10) ** 2 == Proxy(lambda: 100)
assert Proxy(lambda: 20) / 2 == Proxy(lambda: 10)
assert Proxy(lambda: 20) // 2 == Proxy(lambda: 10)
assert Proxy(lambda: 11) % 2 == Proxy(lambda: 1)
assert Proxy(lambda: 10) << 2 == Proxy(lambda: 40)
assert Proxy(lambda: 10) >> 2 == Proxy(lambda: 2)
assert Proxy(lambda: 10) ^ 7 == Proxy(lambda: 13)
assert Proxy(lambda: 10) | 40 == Proxy(lambda: 42)
assert Proxy(lambda: 10) != Proxy(lambda: -11)
assert Proxy(lambda: 10) != Proxy(lambda: -10)
assert Proxy(lambda: -10) == Proxy(lambda: -10)
assert Proxy(lambda: 10) < Proxy(lambda: 20)
assert Proxy(lambda: 20) > Proxy(lambda: 10)
assert Proxy(lambda: 10) >= Proxy(lambda: 10)
assert Proxy(lambda: 10) <= Proxy(lambda: 10)
assert Proxy(lambda: 10) == Proxy(lambda: 10)
assert Proxy(lambda: 20) != Proxy(lambda: 10)
assert Proxy(lambda: 100).__divmod__(30)
assert Proxy(lambda: 100).__truediv__(30)
assert abs(Proxy(lambda: -100))
x = Proxy(lambda: 10)
x -= 1
assert x == 9
x = Proxy(lambda: 9)
x += 1
assert x == 10
x = Proxy(lambda: 10)
x *= 2
assert x == 20
x = Proxy(lambda: 20)
x /= 2
assert x == 10
x = Proxy(lambda: 10)
x %= 2
assert x == 0
x = Proxy(lambda: 10)
x <<= 3
assert x == 80
x = Proxy(lambda: 80)
x >>= 4
assert x == 5
x = Proxy(lambda: 5)
x ^= 1
assert x == 4
x = Proxy(lambda: 4)
x **= 4
assert x == 256
x = Proxy(lambda: 256)
x //= 2
assert x == 128
x = Proxy(lambda: 128)
x |= 2
assert x == 130
x = Proxy(lambda: 130)
x &= 10
assert x == 2
x = Proxy(lambda: 10)
assert type(x.__float__()) == float
assert type(x.__int__()) == int
if not PY3:
assert type(x.__long__()) == long_t
assert hex(x)
assert oct(x)
def test_hash(self):
class X(object):
def __hash__(self):
return 1234
assert hash(Proxy(lambda: X())) == 1234
def test_call(self):
class X(object):
def __call__(self):
return 1234
assert Proxy(lambda: X())() == 1234
def test_context(self):
class X(object):
entered = exited = False
def __enter__(self):
self.entered = True
return 1234
def __exit__(self, *exc_info):
self.exited = True
v = X()
x = Proxy(lambda: v)
with x as val:
assert val == 1234
assert x.entered
assert x.exited
def test_reduce(self):
class X(object):
def __reduce__(self):
return 123
x = Proxy(lambda: X())
assert x.__reduce__() == 123
class test_PromiseProxy:
def test_only_evaluated_once(self):
class X(object):
attr = 123
evals = 0
def __init__(self):
self.__class__.evals += 1
p = PromiseProxy(X)
assert p.attr == 123
assert p.attr == 123
assert X.evals == 1
def test_callbacks(self):
source = Mock(name='source')
p = PromiseProxy(source)
cbA = Mock(name='cbA')
cbB = Mock(name='cbB')
cbC = Mock(name='cbC')
p.__then__(cbA, p)
p.__then__(cbB, p)
assert not p.__evaluated__()
assert object.__getattribute__(p, '__pending__')
assert repr(p)
assert p.__evaluated__()
with pytest.raises(AttributeError):
object.__getattribute__(p, '__pending__')
cbA.assert_called_with(p)
cbB.assert_called_with(p)
assert p.__evaluated__()
p.__then__(cbC, p)
cbC.assert_called_with(p)
with pytest.raises(AttributeError):
object.__getattribute__(p, '__pending__')
def test_maybe_evaluate(self):
x = PromiseProxy(lambda: 30)
assert not x.__evaluated__()
assert maybe_evaluate(x) == 30
assert maybe_evaluate(x) == 30
assert maybe_evaluate(30) == 30
assert x.__evaluated__()
| |
import pytest
import unittest
import wtforms
import minform
class TestBinaryItem(unittest.TestCase):
class MyItem(minform.BinaryItem):
size = 4
def pack(self, data, order=None):
return b'\x01\x02\x03\x04'
def unpack(self, buf, order=None):
return 0x01020304
def test_binary_item_cannot_be_subclassed_without_pack(self):
class MyItem(minform.BinaryItem):
def unpack(self, buf, order=None):
pass
with pytest.raises(TypeError):
MyItem()
def test_binary_item_cannot_be_subclassed_without_unpack(self):
class MyItem(minform.BinaryItem):
def pack(self, data, order=None):
pass
with pytest.raises(TypeError):
MyItem()
def test_pack_into_works_for_same_size_buffer(self):
m = self.MyItem()
buf = bytearray(4)
m.pack_into(buf, 0, None)
assert buf == b'\x01\x02\x03\x04'
def test_pack_into_fails_for_small_buffer(self):
m = self.MyItem()
buf = bytearray(3)
with pytest.raises(ValueError):
m.pack_into(buf, 0, None)
def test_pack_into_fails_for_small_remaining_buffer(self):
m = self.MyItem()
buf = bytearray(5)
with pytest.raises(ValueError):
m.pack_into(buf, 2, None)
def test_pack_into_fails_with_small_buffer_from_end(self):
m = self.MyItem()
buf = bytearray(5)
with pytest.raises(ValueError):
m.pack_into(buf, -3, None)
def test_pack_into_succeeds_with_sufficient_buffer_from_end(self):
m = self.MyItem()
buf = bytearray(5)
m.pack_into(buf, -4, None)
def test_pack_into_succeeds_with_large_buffer_from_end(self):
m = self.MyItem()
buf = bytearray(5)
m.pack_into(buf, -5, None)
def test_unpack_from_works_for_same_size_buffer(self):
m = self.MyItem()
buf = b'\x01\x02\x03\x04'
data = m.unpack_from(buf, 0, None)
assert data == 0x01020304
def test_unpack_from_fails_for_small_buffer(self):
m = self.MyItem()
buf = bytearray(3)
with pytest.raises(ValueError):
m.unpack_from(buf, 0, None)
def test_unpack_from_fails_for_small_remaining_buffer(self):
m = self.MyItem()
buf = bytearray(5)
with pytest.raises(ValueError):
m.unpack_from(buf, 2, None)
def test_unpack_from_fails_with_small_buffer_from_end(self):
m = self.MyItem()
buf = bytearray(5)
with pytest.raises(ValueError):
m.unpack_from(buf, -3, None)
def test_unpack_from_succeeds_with_sufficient_buffer_from_end(self):
m = self.MyItem()
buf = bytearray(5)
data = m.unpack_from(buf, -4, None)
assert data == 0x01020304
class TestBinaryForm(unittest.TestCase):
class Form(minform.BinaryForm):
order = minform.BIG_ENDIAN
char = minform.CharField()
int32 = minform.Int32Field()
bytes = minform.BytesField(max_length=6, length=minform.EXPLICIT)
_ = minform.BlankBytes(3)
lst = minform.BinaryFieldList(minform.UInt8Field(), max_entries=3,
length=minform.FIXED)
end = minform.BytesField(max_length=2, length=minform.AUTOMATIC)
lst2 = minform.BinaryFieldList(minform.UInt16Field(), max_entries=1,
length=minform.EXPLICIT)
data = {
'char': b'\x10',
'int32': 0x12345678,
'bytes': b'foo',
'lst': [
0x01,
0x02,
0x03,
],
'end': b'ab',
'lst2': [0x1234]
}
buf = (b'\x10\x12\x34\x56\x78' +
b'\x03foo\0\0\0' +
b'\0\0\0\x01\x02\x03' +
b'ab\x01\x12\x34')
size = len(buf)
def test_non_field_binary_items_are_removed_from_namespace(self):
assert not hasattr(self.Form, '_')
def test_binary_items_are_replace_with_fields(self):
for name in 'char int32 bytes lst end lst2'.split():
assert isinstance(getattr(self.Form, name),
wtforms.core.UnboundField)
def test_binary_items_are_cached_in_list(self):
assert len(self.Form._binary_items) == 7
assert all(isinstance(f, minform.BinaryItem)
for f in self.Form._binary_items)
def test_pack_into_works_for_same_size_buffer(self):
form = self.Form(data=self.data)
buf = bytearray(self.size)
form.pack_into(buf, 0)
assert buf == self.buf
def test_pack_into_fails_for_small_buffer(self):
m = self.Form(data=self.data)
buf = bytearray(self.size - 1)
with pytest.raises(ValueError):
m.pack_into(buf, 0)
def test_pack_into_fails_for_small_remaining_buffer(self):
m = self.Form(data=self.data)
buf = bytearray(self.size + 1)
with pytest.raises(ValueError):
m.pack_into(buf, 2)
def test_pack_into_fails_with_small_buffer_from_end(self):
m = self.Form(data=self.data)
buf = bytearray(self.size + 1)
with pytest.raises(ValueError):
m.pack_into(buf, -(self.size - 1))
def test_pack_into_succeeds_with_sufficient_buffer_from_end(self):
m = self.Form(data=self.data)
buf = bytearray(self.size + 1)
m.pack_into(buf, -self.size)
def test_unpack_from_succeeds_for_same_size_buffer(self):
form = self.Form.unpack_from(self.buf, 0)
assert form.data == self.data
def test_unpack_from_fails_for_small_buffer(self):
with pytest.raises(ValueError):
self.Form.unpack_from(self.buf[:-1], 0)
def test_unpack_from_fails_for_small_remaining_buffer(self):
buf = bytearray(self.size + 1)
with pytest.raises(ValueError):
self.Form.unpack_from(buf, 2)
def test_unpack_from_fails_with_small_buffer_from_end(self):
buf = bytearray(self.size + 1)
with pytest.raises(ValueError):
self.Form.unpack_from(buf, -(self.size - 1))
def test_unpack_from_succeeds_with_sufficient_buffer_from_end(self):
buf = b'\x00\x00' + self.buf
form = self.Form.unpack_from(buf, -self.size)
assert form.data == self.data
| |
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum.network import DEFAULT_PORTS
pp = servers.get(host, DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', False)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
try:
text = base_decode(data, None, base=43).encode('hex')
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'requests']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
intent = Intent("com.google.zxing.client.android.SCAN")
intent.putExtra("SCAN_MODE", "QR_CODE_MODE")
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
try:
PythonActivity.mActivity.startActivityForResult(intent, 0)
except:
self.show_error(_('Could not start Barcode Scanner.') + ' ' + _('Please install the Barcode Scanner app from ZXing'))
def scan_qr_zxing(self, on_complete):
# uses zxing embedded lib
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
IntentIntegrator = autoclass('com.google.zxing.integration.android.IntentIntegrator')
integrator = IntentIntegrator(PythonActivity.mActivity)
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
integrator.initiateScan()
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
| |
from django.db import connection
class DataRelatedToMe(object):
def __init__(self, username):
self.username = username
self.role_lookup = {} # { role_id : name }
self.direct_role_assignments = []
self.direct_dvobject_assignments = []
# -----------------------
# dataverses
# -----------------------
self.dataverse_info = []
self.all_dataverse_ids = []
# -----------------------
# datasets
# -----------------------
self.dataset_info = []
self.initial_dataset_ids = []
self.secondary_dataset_ids = []
# -----------------------
# datafiles
# -----------------------
self.file_info = []
self.initial_file_ids = []
self.secondary_file_ids = []
# -----------------------
self.err_msg = None
self.err_found = False
self.load_roles()
self.load_dvobject_info()
def get_total_object_count(self):
return len(self.get_dataverse_ids())\
+ len(self.get_dataset_ids())\
+ len(self.get_file_ids())
def get_dataverse_ids(self):
"""Get unique dataverse ids"""
return set(self.all_dataverse_ids)
def get_dataset_ids(self):
"""Get unique Dataset ids"""
return set(self.initial_dataset_ids + self.secondary_dataset_ids)
def get_file_ids(self):
"""Get unique DataFile ids"""
return set(self.initial_file_ids + self.secondary_file_ids)
def load_roles(self):
q = """SELECT name, id, description FROM dataverserole ORDER by id;"""
self.role_query = q
role_query_results = self.get_query_results(q)
for qr in role_query_results:
self.role_lookup[qr['id']] = qr['name']
#d.update(dict(role_query=role_query, role_query_results=role_query_results))
def load_dvobject_info(self):
if not self.step1_load_direct_assignments():
return
if not self.step2_load_direct_dv_objects():
return
self.step3_load_indirect_dataset_info()
self.step_load4_indirect_file_info()
def step_load4_indirect_file_info(self):
if len(self.initial_dataset_ids) == 0 and len(self.secondary_dataset_ids) == 0:
return
dataset_ids_as_str = [ str(x) for x in (self.initial_dataset_ids + self.secondary_dataset_ids)]
q = """SELECT dv.id, dv.dtype, dv.modificationtime, dv.owner_id
FROM dvobject dv
WHERE dv.owner_id IN (%s)
AND dv.dtype IN ('DataFile');
""" % (','.join(dataset_ids_as_str ),
)
self.secondary_file_query = q
qresults = self.get_query_results(q)
if qresults is None or len(qresults)==0:
return
# May overlap with initial datafile_ids and info
self.secondary_file_ids = [ x['id'] for x in qresults]
self.file_info.append(qresults)
def step3_load_indirect_dataset_info(self):
"""If the user has Dataverse assignments, look for underlying Datasets"""
if self.all_dataverse_ids is None or len(self.all_dataverse_ids) == 0:
return
dataverse_ids_as_str = [ str(x) for x in self.all_dataverse_ids]
q = """SELECT dv.id, dv.dtype, dv.modificationtime, dv.owner_id
FROM dvobject dv
WHERE dv.owner_id IN (%s)
AND dv.dtype IN ('Dataset');
""" % (','.join(dataverse_ids_as_str ),
)
self.secondary_dataset_query = q
qresults = self.get_query_results(q)
if qresults is None or len(qresults)==0:
return
# May overlap with initial dataset_ids and info
self.secondary_dataset_ids = [ x['id'] for x in qresults]
self.dataset_info.append(qresults)
def step2_load_direct_dv_objects(self):
assert self.dv_object_ids is not None and len(self.dv_object_ids) > 0, 'You must have dv_object_ids'
dv_ids_as_strings = [ str(x) for x in self.dv_object_ids]
q = """SELECT dv.id, dv.dtype, dv.modificationtime, dv.owner_id
FROM dvobject dv
WHERE dv.id IN (%s)
ORDER BY dv.dtype;""" % ','.join(dv_ids_as_strings)
self.dvobject_query = q
qresults = self.get_query_results(q)
if qresults is None or len(qresults)==0:
self.add_err_msg('No direct dv objects found.')
return False
self.direct_dvobject_assignments = qresults
# Parse out Dataverse information (complete)
#
self.dataverse_info = [ x for x in qresults if x['dtype'] == 'Dataverse']
self.all_dataverse_ids = [ x['id'] for x in self.dataverse_info]
# Parse out Dataset information (incomplete)
#
self.dataset_info = [ x for x in qresults if x['dtype'] == 'Dataset']
self.initial_dataset_ids = [ x['id'] for x in self.dataset_info]
# Parse out File information (incomplete)
#
self.file_info = [ x for x in qresults if x['dtype'] == 'DataFile']
self.initial_file_ids = [ x['id'] for x in self.file_info]
print 'initial_file_ids', len(self.initial_file_ids)
return True
def step1_load_direct_assignments(self):
"""
Pull Info for Directly Assigned Objects
"""
q = """SELECT r.id, r.assigneeidentifier, r.definitionpoint_id, r.role_id
FROM roleassignment r
WHERE substr(r.assigneeidentifier, 2)= '%s';""" % (self.username,)
self.assign_query = q
qresults = self.get_query_results(q)
if qresults is None or len(qresults)==0:
self.add_err_msg('No direct role assignments found.')
return False
self.direct_role_assignments = qresults
self.dv_object_ids = [ x['definitionpoint_id'] for x in qresults]
return True
def add_err_msg(self, m):
self.err_found = True
self.err_msg = m
def get_query_results(self, query_str):
cursor = connection.cursor()
cursor.execute(query_str)
return self.dictfetchall(cursor)
def dictfetchall(self, cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
| |
"""resonance finder
"""
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import chirp, spectrogram
# from scipy.signal import butter, lfilter_zi, lfilter
from scipy import signal
def main_single(args):
# print "args", args
plt.ion()
# system: very simple resonance model mit one delay line per dimension of space
# space dimension
dim_space = 2 # standard
# state dimension
dim_state = 1 # monoaural microphone
# motor dimension
dim_motor = 1 # monoaural speaker
delay_max = 200
delay_buf = np.zeros((delay_max, 1)) # dim_state))
delay_tap = np.array((50))
# delay_tap = np.array((13, 17, 39))
# delay_tap = np.array((20, 25))
# delay_tap = np.array((20, 25, 39, 120, 137, 67))
# W_i = np.zeros(())
# exploration: sweep, uniform noise, ...
sr = 1e4
numsec = 1
numstep = int(sr * numsec)
T = np.linspace(0, 1, numstep) #
# # input
# X = chirp(T, 0, 1.0, sr/2) #
# Y = np.zeros((numstep, 1))
# for i, t in enumerate(T):
# # update delay lines
# delay_buf[0,0] = X[i]
# delay_buf = np.roll(delay_buf, shift = 1, axis = 0)
# # update state from taps (end of line)
# Y[i,0] = X[i] + np.sum(delay_buf[delay_tap,0] * 0.96)
# Xf, Xt, Xspec = spectrogram(X.T, fs = sr)
# Yf, Yt, Yspec = spectrogram(Y.T, fs = sr)
# print "X", Xf.shape, Xt.shape, Xspec.shape
# print "Y", Yf.shape, Yt.shape, Yspec.shape
# # output
# fig = plt.figure()
# ax1 = fig.add_subplot(2,2,1)
# ax1.plot(X)
# ax2 = fig.add_subplot(2,2,3)
# ax2.pcolor(Xt, Xf, Xspec)
# ax3 = fig.add_subplot(2,2,2)
# ax3.plot(Y)
# ax4 = fig.add_subplot(2,2,4)
# ax4.pcolor(Yt, Yf, Yspec[0])
# plt.draw()
# interactive
param_f = np.random.uniform(0, sr/2.0)
A = np.zeros((numstep, 1))
DT_ = np.zeros((numstep, 1))
X = np.zeros((numstep, 1))
Y = np.zeros((numstep, 1))
A_var = np.zeros((numstep, 1))
X_var = np.zeros((numstep, 1))
Y_var = np.zeros((numstep, 1))
X_var_z = np.zeros((numstep, 1))
Y_var_z = np.zeros((numstep, 1))
# eta = 0.5
eta = 1
b, a = signal.butter(10, 0.01)
print "b = %s, a = %s" % (b, a)
zi = signal.lfilter_zi(b, a)
# zi = zi.reshape((1, -1))
print "zi", zi.shape
t_ = 0.0
dt_ = (param_f/sr)*np.pi
updatecnt = 0
for i, t in enumerate(T):
if i % 100 == 0:
# random exploration
lim_expl = 5e-3
A[i,0] = dt_ + np.random.uniform(-lim_expl, lim_expl)
else:
A[i,0] = A[i-1,0]
DT_[i,0] = dt_
# t_ += dt_
t_ += A[i,0]
X[i,0] = np.sin(t_)
# update delay lines
delay_buf[0,0] = X[i,0]
delay_buf = np.roll(delay_buf, shift = 1, axis = 0)
# update state from taps (end of line)
Y[i,0] = X[i] + np.sum(delay_buf[delay_tap,0] * 0.96)
if i < 100: continue
# filtered version
X_var[i,0] = np.var(X[i-100:i])
Y_var[i,0] = np.var(Y[i-100:i])
if i == 100:
X_zi = zi*X_var[i-1]
Y_zi = zi*Y_var[i-1]
else:
X_zi = X_var_zi_f
Y_zi = Y_var_zi_f
# print "X[i-100:i]", X[i-100:i].shape
# print "zi*X[i-100]", (zi*X[i-100]).shape
# X_var_z[i,0] = signal.lfilter(b, a, X_var[i-100:i]) #, zi=zi*X[i-100])
# Y_var_z[i,0] = signal.lfilter(b, a, Y_var[i-100:i]) #, zi=zi*Y[i-100])
# print "X_var[i]", X_var[i-100:i,0].T, "X_zi", X_zi
# print "Y_var[i]", Y_var[i-100:i,0].T, "Y_zi", Y_zi
# single step
X_var_z_f, X_var_zi_f = signal.lfilter(b, a, X_var[i-100:i,0].T, zi=X_zi)
Y_var_z_f, Y_var_zi_f = signal.lfilter(b, a, Y_var[i-100:i,0].T, zi=Y_zi)
# print "X_var_zi_f", X_var_zi_f
X_var_z[i,0] = X_var_z_f[-1]
Y_var_z[i,0] = Y_var_z_f[-1]
# X_var[i,0] = np.var(Xz)
# Y_var[i,0] = np.var(Yz)
# if np.sum(Y_var[i-10:i,0]) > np.sum(Y_var[i-110:i-100,0]):
# dt_ += eta * (A[i,0] - dt_)
if i % 100 == 0:
print "resfind[%d] checking var(X) = %s, var(Y) = %s" % (i, X_var[i], Y_var[i])
# if Y_var[i-1,0] > Y_var[i-101,0]:
# # v1: initial
# if (Y_var[i-1,0] - Y_var[i-101,0]) > 0.001:
# print "resfind[%d] updating dt_ = %f" % (i, dt_)
# dt_ += eta * (A[i-1,0] - dt_)
if (Y_var_z[i,0] - Y_var_z[i-100,0]) > 0.00:
# if (np.mean(Y_var_z[i-100:i,0]) - np.mean(Y_var_z[i-200:i-100,0])) > 0.00:
print "resfind[%d] updating dt_ = %f" % (i, dt_)
# dt_ += eta * (A[i-1,0] - dt_)
dt_ += eta * (A[i-1,0] - dt_)
else:
pass
if i < 2000: continue
dvar_long = np.abs(np.mean(Y_var[i-1000:i,0]) - np.mean(Y_var[i-2000:i-1000,0]))
# if dvar_long < 1e-2:
# # if i % 10000 == 11000:
# print "dvar_long = %f" % (dvar_long)
# param_f = np.random.uniform(0, sr/2.0)
# # param_f = np.random.uniform(-1e-2, 1e-2)
# dt_ = (param_f/sr)*np.pi
# print "converged / stagnated - reinit dt_ = %f" % (dt_, )
# measure Y[i,0] with extensiveness N
# update param_f toward max meas(Y)
Xf, Xt, Xspec = spectrogram(X.T, fs = sr)
Yf, Yt, Yspec = spectrogram(Y.T, fs = sr)
fig2 = plt.figure()
ax1 = fig2.add_subplot(4,2,1)
ax1.plot(X)
ax2 = fig2.add_subplot(4,2,3)
ax2.plot(X_var)
ax3 = fig2.add_subplot(4,2,5)
ax3.pcolor(Xt, Xf, Xspec[0])
ax4 = fig2.add_subplot(4,2,2)
ax4.plot(Y)
ax5 = fig2.add_subplot(4,2,4)
ax5.plot(Y_var)
ax6 = fig2.add_subplot(4,2,6)
ax6.pcolor(Yt, Yf, Yspec[0])
ax8 = fig2.add_subplot(4,2,8)
ax8.plot(DT_)
plt.draw()
plt.ioff()
plt.show()
# measure input power
# measure output power
# update freq params towards i/o gain gradient
# sys =
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', type=str, default='single')
args = parser.parse_args()
if args.mode in ['single']:
main_single(args)
else:
print 'Unknown mode %s' % (args.mode,)
| |
"""Component to integrate the Home Assistant cloud."""
import logging
from hass_nabucasa import Cloud
import voluptuous as vol
from homeassistant.components.alexa import const as alexa_const
from homeassistant.components.google_assistant import const as ga_c
from homeassistant.const import (
CONF_MODE,
CONF_NAME,
CONF_REGION,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entityfilter
from homeassistant.loader import bind_hass
from homeassistant.util.aiohttp import MockRequest
from . import account_link, http_api
from .client import CloudClient
from .const import (
CONF_ACCOUNT_LINK_URL,
CONF_ACME_DIRECTORY_SERVER,
CONF_ALEXA,
CONF_ALEXA_ACCESS_TOKEN_URL,
CONF_ALIASES,
CONF_CLOUDHOOK_CREATE_URL,
CONF_COGNITO_CLIENT_ID,
CONF_ENTITY_CONFIG,
CONF_FILTER,
CONF_GOOGLE_ACTIONS,
CONF_GOOGLE_ACTIONS_REPORT_STATE_URL,
CONF_RELAYER,
CONF_REMOTE_API_URL,
CONF_SUBSCRIPTION_INFO_URL,
CONF_USER_POOL_ID,
CONF_VOICE_API_URL,
DOMAIN,
MODE_DEV,
MODE_PROD,
)
from .prefs import CloudPreferences
_LOGGER = logging.getLogger(__name__)
DEFAULT_MODE = MODE_PROD
SERVICE_REMOTE_CONNECT = "remote_connect"
SERVICE_REMOTE_DISCONNECT = "remote_disconnect"
ALEXA_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(alexa_const.CONF_DESCRIPTION): cv.string,
vol.Optional(alexa_const.CONF_DISPLAY_CATEGORIES): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
GOOGLE_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ga_c.CONF_ROOM_HINT): cv.string,
}
)
ASSISTANT_SCHEMA = vol.Schema(
{vol.Optional(CONF_FILTER, default=dict): entityfilter.FILTER_SCHEMA}
)
ALEXA_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ALEXA_ENTITY_SCHEMA}}
)
GACTIONS_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: GOOGLE_ENTITY_SCHEMA}}
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_MODE, default=DEFAULT_MODE): vol.In(
[MODE_DEV, MODE_PROD]
),
vol.Optional(CONF_COGNITO_CLIENT_ID): str,
vol.Optional(CONF_USER_POOL_ID): str,
vol.Optional(CONF_REGION): str,
vol.Optional(CONF_RELAYER): str,
vol.Optional(CONF_SUBSCRIPTION_INFO_URL): vol.Url(),
vol.Optional(CONF_CLOUDHOOK_CREATE_URL): vol.Url(),
vol.Optional(CONF_REMOTE_API_URL): vol.Url(),
vol.Optional(CONF_ACME_DIRECTORY_SERVER): vol.Url(),
vol.Optional(CONF_ALEXA): ALEXA_SCHEMA,
vol.Optional(CONF_GOOGLE_ACTIONS): GACTIONS_SCHEMA,
vol.Optional(CONF_ALEXA_ACCESS_TOKEN_URL): vol.Url(),
vol.Optional(CONF_GOOGLE_ACTIONS_REPORT_STATE_URL): vol.Url(),
vol.Optional(CONF_ACCOUNT_LINK_URL): vol.Url(),
vol.Optional(CONF_VOICE_API_URL): vol.Url(),
}
)
},
extra=vol.ALLOW_EXTRA,
)
class CloudNotAvailable(HomeAssistantError):
"""Raised when an action requires the cloud but it's not available."""
@bind_hass
@callback
def async_is_logged_in(hass) -> bool:
"""Test if user is logged in."""
return DOMAIN in hass.data and hass.data[DOMAIN].is_logged_in
@bind_hass
@callback
def async_active_subscription(hass) -> bool:
"""Test if user has an active subscription."""
return async_is_logged_in(hass) and not hass.data[DOMAIN].subscription_expired
@bind_hass
async def async_create_cloudhook(hass, webhook_id: str) -> str:
"""Create a cloudhook."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
hook = await hass.data[DOMAIN].cloudhooks.async_create(webhook_id, True)
return hook["cloudhook_url"]
@bind_hass
async def async_delete_cloudhook(hass, webhook_id: str) -> None:
"""Delete a cloudhook."""
if DOMAIN not in hass.data:
raise CloudNotAvailable
await hass.data[DOMAIN].cloudhooks.async_delete(webhook_id)
@bind_hass
@callback
def async_remote_ui_url(hass) -> str:
"""Get the remote UI URL."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
if not hass.data[DOMAIN].client.prefs.remote_enabled:
raise CloudNotAvailable
if not hass.data[DOMAIN].remote.instance_domain:
raise CloudNotAvailable
return f"https://{hass.data[DOMAIN].remote.instance_domain}"
def is_cloudhook_request(request):
"""Test if a request came from a cloudhook.
Async friendly.
"""
return isinstance(request, MockRequest)
async def async_setup(hass, config):
"""Initialize the Home Assistant cloud."""
# Process configs
if DOMAIN in config:
kwargs = dict(config[DOMAIN])
else:
kwargs = {CONF_MODE: DEFAULT_MODE}
# Alexa/Google custom config
alexa_conf = kwargs.pop(CONF_ALEXA, None) or ALEXA_SCHEMA({})
google_conf = kwargs.pop(CONF_GOOGLE_ACTIONS, None) or GACTIONS_SCHEMA({})
# Cloud settings
prefs = CloudPreferences(hass)
await prefs.async_initialize()
# Initialize Cloud
websession = hass.helpers.aiohttp_client.async_get_clientsession()
client = CloudClient(hass, prefs, websession, alexa_conf, google_conf)
cloud = hass.data[DOMAIN] = Cloud(client, **kwargs)
async def _startup(event):
"""Startup event."""
await cloud.start()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _startup)
async def _shutdown(event):
"""Shutdown event."""
await cloud.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
async def _service_handler(service):
"""Handle service for cloud."""
if service.service == SERVICE_REMOTE_CONNECT:
await cloud.remote.connect()
await prefs.async_update(remote_enabled=True)
elif service.service == SERVICE_REMOTE_DISCONNECT:
await cloud.remote.disconnect()
await prefs.async_update(remote_enabled=False)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_CONNECT, _service_handler
)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_DISCONNECT, _service_handler
)
loaded = False
async def _on_connect():
"""Discover RemoteUI binary sensor."""
nonlocal loaded
# Prevent multiple discovery
if loaded:
return
loaded = True
hass.async_create_task(
hass.helpers.discovery.async_load_platform(
"binary_sensor", DOMAIN, {}, config
)
)
hass.async_create_task(
hass.helpers.discovery.async_load_platform("stt", DOMAIN, {}, config)
)
hass.async_create_task(
hass.helpers.discovery.async_load_platform("tts", DOMAIN, {}, config)
)
cloud.iot.register_on_connect(_on_connect)
await http_api.async_setup(hass)
account_link.async_setup(hass)
return True
| |
"""
Kernel Call Details
===================
When calling sas computational kernels with polydispersity there are a
number of details that need to be sent to the caller. This includes the
list of polydisperse parameters, the number of points in the polydispersity
weight distribution, and which parameter is the "theta" parameter for
polar coordinate integration. The :class:`CallDetails` object maintains
this data. Use :func:`make_details` to build a *details* object which
can be passed to one of the computational kernels.
"""
from __future__ import print_function
import numpy as np # type: ignore
from numpy import cos, sin, radians
from .modelinfo import NUM_MAGNETIC_PARS, NUM_COMMON_PARS
try:
np.meshgrid([])
meshgrid = np.meshgrid
except Exception:
# CRUFT: np.meshgrid requires multiple vectors
def meshgrid(*args):
"""See docs from a recent version of numpy"""
if len(args) > 1:
return np.meshgrid(*args)
else:
return [np.asarray(v) for v in args]
# pylint: disable=unused-import
try:
from typing import List, Tuple, Sequence
from .modelinfo import ModelInfo, ParameterTable
from .kernel import Kernel
except ImportError:
pass
# pylint: enable=unused-import
class CallDetails(object):
"""
Manage the polydispersity information for the kernel call.
Conceptually, a polydispersity calculation is an integral over a mesh
in n-D space where n is the number of polydisperse parameters. In order
to keep the program responsive, and not crash the GPU, only a portion
of the mesh is computed at a time. Meshes with a large number of points
will therefore require many calls to the polydispersity loop. Restarting
a nested loop in the middle requires that the indices of the individual
mesh dimensions can be computed for the current loop location. This
is handled by the *pd_stride* vector, with n//stride giving the loop
index and n%stride giving the position in the sub loops.
One of the parameters may be the latitude. When integrating in polar
coordinates, the total circumference decreases as latitude varies from
pi r^2 at the equator to 0 at the pole, and the weight associated
with a range of latitude values needs to be scaled by this circumference.
This scale factor needs to be updated each time the theta value
changes. *theta_par* indicates which of the values in the parameter
vector is the latitude parameter, or -1 if there is no latitude
parameter in the model. In practice, the normalization term cancels
if the latitude is not a polydisperse parameter.
"""
parts = None # type: List["CallDetails"]
def __init__(self, model_info):
# type: (ModelInfo) -> None
parameters = model_info.parameters
max_pd = parameters.max_pd
# Structure of the call details buffer:
# pd_par[max_pd] pd params in order of length
# pd_length[max_pd] length of each pd param
# pd_offset[max_pd] offset of pd values in parameter array
# pd_stride[max_pd] index of pd value in loop = n//stride[k]
# num_eval total length of pd loop
# num_weights total length of the weight vector
# num_active number of pd params
# theta_par parameter number for theta parameter
self.buffer = np.empty(4*max_pd + 4, 'i4')
# generate views on different parts of the array
self._pd_par = self.buffer[0 * max_pd:1 * max_pd]
self._pd_length = self.buffer[1 * max_pd:2 * max_pd]
self._pd_offset = self.buffer[2 * max_pd:3 * max_pd]
self._pd_stride = self.buffer[3 * max_pd:4 * max_pd]
# theta_par is fixed
self.theta_par = parameters.theta_offset
# offset and length are for all parameters, not just pd parameters
# They are not sent to the kernel function, though they could be.
# They are used by the composite models (sum and product) to
# figure out offsets into the combined value list.
self.offset = None # type: np.ndarray
self.length = None # type: np.ndarray
# keep hold of ifno show() so we can break a values vector
# into the individual components
self.info = model_info
@property
def pd_par(self):
"""List of polydisperse parameters"""
return self._pd_par
@property
def pd_length(self):
"""Number of weights for each polydisperse parameter"""
return self._pd_length
@property
def pd_offset(self):
"""Offsets for the individual weight vectors in the set of weights"""
return self._pd_offset
@property
def pd_stride(self):
"""Stride in the pd mesh for each pd dimension"""
return self._pd_stride
@property
def num_eval(self):
"""Total size of the pd mesh"""
return self.buffer[-4]
@num_eval.setter
def num_eval(self, v):
"""Total size of the pd mesh"""
self.buffer[-4] = v
@property
def num_weights(self):
"""Total length of all the weight vectors"""
return self.buffer[-3]
@num_weights.setter
def num_weights(self, v):
"""Total length of all the weight vectors"""
self.buffer[-3] = v
@property
def num_active(self):
"""Number of active polydispersity loops"""
return self.buffer[-2]
@num_active.setter
def num_active(self, v):
"""Number of active polydispersity loops"""
self.buffer[-2] = v
@property
def theta_par(self):
"""Location of the theta parameter in the parameter vector"""
return self.buffer[-1]
@theta_par.setter
def theta_par(self, v):
"""Location of the theta parameter in the parameter vector"""
self.buffer[-1] = v
def show(self, values=None):
"""Print the polydispersity call details to the console"""
print("===== %s details ===="%self.info.name)
print("num_active:%d num_eval:%d num_weights:%d theta=%d"
% (self.num_active, self.num_eval, self.num_weights, self.theta_par))
if self.pd_par.size:
print("pd_par", self.pd_par)
print("pd_length", self.pd_length)
print("pd_offset", self.pd_offset)
print("pd_stride", self.pd_stride)
if values is not None:
nvalues = self.info.parameters.nvalues
print("scale, background", values[:2])
print("val", values[2:nvalues])
print("pd", values[nvalues:nvalues+self.num_weights])
print("wt", values[nvalues+self.num_weights:nvalues+2*self.num_weights])
print("offsets", self.offset)
def make_details(model_info, length, offset, num_weights):
# type: (ModelInfo, np.ndarray, np.ndarray, int) -> CallDetails
"""
Return a :class:`CallDetails` object for a polydisperse calculation
of the model defined by *model_info*. Polydispersity is defined by
the *length* of the polydispersity distribution for each parameter
and the *offset* of the distribution in the polydispersity array.
Monodisperse parameters should use a polydispersity length of one
with weight 1.0. *num_weights* is the total length of the polydispersity
array.
"""
#pars = model_info.parameters.call_parameters[2:model_info.parameters.npars+2]
#print(", ".join(str(i)+"-"+p.id for i,p in enumerate(pars)))
#print("len:",length)
#print("off:",offset)
# Check that we aren't using too many polydispersity loops
num_active = np.sum(length > 1)
max_pd = model_info.parameters.max_pd
if num_active > max_pd:
raise ValueError("Too many polydisperse parameters")
# Decreasing list of polydpersity lengths
# Note: the reversing view, x[::-1], does not require a copy
idx = np.argsort(length)[::-1][:max_pd]
pd_stride = np.cumprod(np.hstack((1, length[idx])))
call_details = CallDetails(model_info)
call_details.pd_par[:max_pd] = idx
call_details.pd_length[:max_pd] = length[idx]
call_details.pd_offset[:max_pd] = offset[idx]
call_details.pd_stride[:max_pd] = pd_stride[:-1]
call_details.num_eval = pd_stride[-1]
call_details.num_weights = num_weights
call_details.num_active = num_active
call_details.length = length
call_details.offset = offset
#call_details.show()
return call_details
ZEROS = tuple([0.]*31)
def make_kernel_args(kernel, mesh):
# type: (Kernel, Tuple[List[np.ndarray], List[np.ndarray]]) -> Tuple[CallDetails, np.ndarray, bool]
"""
Converts (value, dispersity, weight) for each parameter into kernel pars.
Returns a CallDetails object indicating the polydispersity, a data object
containing the different values, and the magnetic flag indicating whether
any magnetic magnitudes are non-zero. Magnetic vectors (M0, phi, theta) are
converted to rectangular coordinates (mx, my, mz).
"""
npars = kernel.info.parameters.npars
nvalues = kernel.info.parameters.nvalues
scalars = [value for value, dispersity, weight in mesh]
# skipping scale and background when building values and weights
_, dispersity, weight = (
zip(*mesh[NUM_COMMON_PARS:npars+NUM_COMMON_PARS]) if npars
else ((), (), ()))
#weight = correct_theta_weights(kernel.info.parameters, dispersity, weight)
length = np.array([len(w) for w in weight])
offset = np.cumsum(np.hstack((0, length)))
call_details = make_details(kernel.info, length, offset[:-1], offset[-1])
# Pad value array to a 32 value boundary
data_len = nvalues + 2*sum(len(v) for v in dispersity)
extra = (32 - data_len%32)%32
data = np.hstack((scalars,) + dispersity + weight + ZEROS[:extra])
data = data.astype(kernel.dtype)
is_magnetic = convert_magnetism(kernel.info.parameters, data)
#call_details.show()
#print("data", data)
return call_details, data, is_magnetic
def correct_theta_weights(parameters, dispersity, weights):
# type: (ParameterTable, Sequence[np.ndarray], Sequence[np.ndarray]) -> Sequence[np.ndarray]
"""
**Deprecated** Theta weights will be computed in the kernel wrapper if
they are needed.
If there is a theta parameter, update the weights of that parameter so that
the cosine weighting required for polar integration is preserved.
Avoid evaluation strictly at the pole, which would otherwise send the
weight to zero. This is probably not a problem in practice (if dispersity
is +/- 90, then you probably should be using a 1-D model of the circular
average).
Note: scale and background parameters are not include in the tuples for
dispersity and weights, so index is parameters.theta_offset, not
parameters.theta_offset+2
Returns updated weights vectors
"""
# Apparently the parameters.theta_offset similarly skips scale and
# and background, so the indexing works out, but they are still shipped
# to the kernel, so we need to add two there.
if parameters.theta_offset >= 0:
index = parameters.theta_offset
theta = dispersity[index]
theta_weight = abs(cos(radians(theta)))
weights = tuple(theta_weight*w if k == index else w
for k, w in enumerate(weights))
return weights
def convert_magnetism(parameters, values):
# type: (ParameterTable, Sequence[np.ndarray]) -> bool
"""
Convert magnetism values from polar to rectangular coordinates.
Returns True if any magnetism is present.
"""
nmagpars = NUM_MAGNETIC_PARS*parameters.nmagnetic
mag = values[parameters.nvalues-nmagpars : parameters.nvalues]
mag = mag.reshape(-1, NUM_MAGNETIC_PARS)
if np.any(mag[:, 0] != 0.0):
M0 = mag[:, 0].copy()
theta, phi = radians(mag[:, 1]), radians(mag[:, 2])
mag[:, 0] = M0 * sin(theta) * cos(phi) # mx
mag[:, 1] = M0 * sin(theta) * sin(phi) # my
mag[:, 2] = M0 * cos(theta) # mz
return True
else:
return False
def dispersion_mesh(model_info, mesh):
# type: (ModelInfo, List[Tuple[float, np.ndarray, np.ndarray]]) -> Tuple[List[np.ndarray], List[np.ndarray]]
"""
Create a mesh grid of dispersion parameters and weights.
*mesh* is a list of (value, dispersity, weights), where the values
are the individual parameter values, and (dispersity, weights) is
the distribution of parameter values.
Only the volume parameters should be included in this list. Orientation
parameters do not affect the calculation of effective radius or volume
ratio. This is convenient since it avoids the distinction between
value and dispersity that is present in orientation parameters but not
shape parameters.
Returns [p1,p2,...],w where pj is a vector of values for parameter j
and w is a vector containing the products for weights for each
parameter set in the vector.
"""
_, dispersity, weight = zip(*mesh)
#weight = [w if len(w)>0 else [1.] for w in weight]
weight = np.vstack([v.flatten() for v in meshgrid(*weight)])
weight = np.prod(weight, axis=0)
dispersity = [v.flatten() for v in meshgrid(*dispersity)]
lengths = [par.length for par in model_info.parameters.kernel_parameters
if par.type == 'volume']
if any(n > 1 for n in lengths):
pars = []
offset = 0
for n in lengths:
pars.append(np.vstack(dispersity[offset:offset+n])
if n > 1 else dispersity[offset])
offset += n
dispersity = pars
return dispersity, weight
| |
import settings,facebook,urllib,cgi,datetime
from timestack import utils
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, HttpResponse
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from timestack.models import *
#render_to_rensponse
def index(request):
params = dict(LOGIN_URL='https://www.facebook.com/dialog/oauth?client_id=' + str(settings.FACEBOOK_APP_ID) + \
'&redirect_uri='+urllib.quote(settings.FACEBOOK_REDIRECT_URI) + \
'&scope=read_stream,offline_access,user_photos,friends_photos,user_photo_video_tags,friends_photo_video_tags')
return render_to_response('timestack/index.html',params,context_instance=RequestContext(request))
def login(request, next='/home/'):
error = ''
if request.method == 'GET':
if 'code' in request.GET:
args = {
'client_id': settings.FACEBOOK_APP_ID,
'redirect_uri': (settings.FACEBOOK_REDIRECT_URI),
'client_secret': settings.FACEBOOK_APP_SECRET,
'code': request.GET['code'],
}
url = 'https://graph.facebook.com/oauth/access_token?' + urllib.urlencode(args)
response = cgi.parse_qs(urllib.urlopen(url).read())
access_token = response['access_token'][0]
#access by Backend
user = auth.authenticate(token=access_token)
if user:
if user.is_active:
auth.login(request, user)
if (datetime.datetime.now()-user.date_joined).days < 2:
request.session['first_time']=True
try:
if settings.STAFFS.index(user.username)>=0:
user.is_staff=True
user.save()
except:
user.is_staff=False
user.save()
pass
return redirect(next)
else:
error = 'AUTH_DISABLED'
else:
error = 'AUTH_FAILED'
elif 'error_reason' in request.GET:
error = 'AUTH_DENIED'
else:
login_url='https://www.facebook.com/dialog/oauth?client_id=' + str(settings.FACEBOOK_APP_ID) + \
'&redirect_uri='+urllib.quote(settings.FACEBOOK_REDIRECT_URI) + \
'&scope=read_stream,offline_access,user_photos,friends_photos,user_photo_video_tags,friends_photo_video_tags'
return HttpResponseRedirect(login_url)
request.session['error']="Cannot login"
return HttpResponseRedirect('/')
def logout(request):
if request.user.is_authenticated():
auth.logout(request)
request.session['msg']="Logged out"
# return HttpResponseRedirect('/')
return redirect('http://m.facebook.com/logout.php?confirm=1&next='+ urllib.quote(settings.URI))
def unauthorized(request):
params = {}
return render_to_response('timestack/unauthorized.html',params,context_instance=RequestContext(request))
@login_required
def home(request):
if request.user.username in settings.STAFFS:
songs = Song.objects.filter(activate=True)
first_song = None
playlist = ''
if len(songs)>0:
first_song = songs[0]
delim = ''
if len(songs)>1:
for song in songs[1:]:
playlist += ('%s%s' % (delim,song.sid))
delim = ','
params = {
'song_enable':True if len(songs)>0 else False,
'first_song':'' if first_song is None else first_song,
'playlist':'' if not playlist else playlist,
}
return render_to_response('timestack/home.html',params,context_instance=RequestContext(request))
return redirect('/unauthorized/')
@login_required
def get_photos(request, page=1, limit=25, person=0):
if request.user.username in settings.STAFFS:
albums = Album.objects.all()
pictures = []
sources = []
if len(albums) >= int(page) & int(page)>0:
args = {'limit': 200}
album = albums[int(page)-1]
photos = facebook.GraphAPI(album.owner.profile.access_token).request(str(album)+'/photos',args)
for photo in photos['data']:
pictures.append(photo['picture'])
sources.append(photo['source'])
res = dict(pictures=pictures,sources=sources,page=page,limit=limit,albums=len(albums))
else:
args = {
'offset': (int(page)-len(albums)-1)*int(limit),
'limit': limit,
}
i = 0
for staff in settings.STAFFS:
photos = facebook.GraphAPI(request.user.profile.access_token).request(staff+'/photos',args)
i=i+1
index = 0
for photo in photos['data']:
for tag in photo['tags']['data']:
banned = True if tag['id'] in settings.BANNED else False
if not banned:
pictures.insert(index,photo['picture'])
sources.insert(index,photo['source'])
index = index+i
# person_index = person_index+1 if person_index+1<len(settings.STAFFS) else 0
res = dict(pictures=pictures,sources=sources,page=page,limit=limit,albums=len(albums))
return HttpResponse(utils._build_json(res),mimetype='application/json')
@login_required
def add_song(request):
if request.user.username in settings.STAFFS:
if request.method == 'POST':
data = request.POST
youtube_url = data.get('link')
if(youtube_url.find('www.youtube.com/watch?v=')>=0):
start = youtube_url.find('v=') + 2
end = youtube_url.find('&',start-2)
code = youtube_url[start:] if end<0 else youtube_url[start:end]
# fetch youtube feed json data
json_string = urllib.urlopen('http://gdata.youtube.com/feeds/api/videos/%s?v=2&alt=json' % code).read()
json = utils._parse_json(json_string)
s = Song(sid=code,owner=request.user,title=json['entry']['title']['$t'],activate=True)
s.save()
return HttpResponse(utils._build_json(dict(result=True,title=json['entry']['title']['$t'])),mimetype='application/javascript')
return HttpResponse(utils._build_json(dict(result=False)),mimetype='application/json')
return HttpResponse(utils._build_json(dict(result=False)),mimetype='application/json')
@login_required
def get_playlist(request):
if request.user.username in settings.STAFFS:
songs = Song.objects.filter(activate=True)
playlist = []
for song in songs:
playlist.append(dict(name=str(song.owner.profile),uid=song.owner.username,id=song.pk,sid=song.sid,title=song.title,date=song.date_added.strftime("%b %d, %Y")))
return HttpResponse(utils._build_json(dict(result=True,playlist=playlist)),mimetype='application/json')
return HttpResponse(utils._build_json(dict(result=False)),mimetype='application/json')
@login_required
def remove_song(request,pk):
if request.user.username in settings.STAFFS:
song = Song.objects.get(pk=pk)
song.activate = False
song.save()
return HttpResponse(utils._build_json(dict(result=True)),mimetype='application/json')
return HttpResponse(utils._build_json(dict(result=False)),mimetype='application/json')
@login_required
def add_message(request):
if request.user.username in settings.STAFFS:
if request.method == 'POST':
user = request.user
data = request.POST
message = data.get('message')
if message=='Add your message here...':
return HttpResponse(utils._build_json(dict(result=False)),mimetype='application/json')
messageInstance = Message(owner=user,detail=message,activate=True)
messageInstance.save()
return HttpResponse(utils._build_json(dict(result=True)),mimetype='application/json')
return HttpResponse(utils._build_json(dict(result=False)),mimetype='application/json')
@login_required
def edit_message(request):
if request.user.username in settings.STAFFS:
if request.method == 'POST':
user = request.user
data = request.POST
pk = data.get('id')
message = data.get('message')
messageInstance = Message.objects.get(pk=pk)
messageInstance.detail = message
messageInstance.date_updated = datetime.datetime.now()
messageInstance.save()
return HttpResponse(utils._build_json(dict(result=True)),mimetype='application/json')
return HttpResponse(utils._build_json(dict(result=False)),mimetype='application/json')
@login_required
def remove_message(request,pk):
if request.user.username in settings.STAFFS:
messageInstance = Message.objects.get(pk=pk)
messageInstance.date_updated=datetime.datetime.now()
messageInstance.activate=False
messageInstance.save()
return HttpResponse(utils._build_json(dict(result=True)),mimetype='application/json')
return HttpResponse(utils._build_json(dict(result=False)),mimetype='application/json')
# RR_TOKEN=r'206785029334862|464b079189e2cf2d4fcaec44.1-502535747|ZqbHq1Apaw0xOVXXh9fvLydWpXQ'
@login_required
def list_message(request):
# args={}
# res = facebook.GraphAPI(RR_TOKEN).request('me/home',args)
# messages = []
# for message in res['data']:
# messages.append(dict(
# id=message['id'],
# uid=message['from']['id'],
# name=message['from']['name'],
# message='' if not 'message' in message else message['message'],
# date=message['created_time'],
# primary=True
# ))
# return HttpResponse(utils._build_json(dict(messages=messages,count=len(res['data']))),mimetype='application/json')
if request.user.username in settings.STAFFS:
messageInstances = Message.objects.filter(activate=True)
messages = []
for messageInstance in messageInstances:
messages.append(dict(
id=messageInstance.pk,
uid=messageInstance.owner.username,
name=str(messageInstance.owner.profile),
message=messageInstance.detail,
date=messageInstance.date_added.strftime("%b %d, %Y - %H:%M:%S"),
primary=True if request.user.username==messageInstance.owner.username else False
)
)
return HttpResponse(utils._build_json(dict(messages=messages,count=len(messageInstances))),mimetype='application/json')
return HttpResponse(utils._build_json(dict(result=False)),mimetype='application/json')
@login_required
def add_album(request,aid):
if request.user.username in settings.STAFFS:
album = Album(owner=request.user,aid=aid)
album.save();
return HttpResponse(utils._build_json(dict(result=True)),mimetype='application/json')
| |
# liblouis Braille Translation and Back-Translation Library
#
# Copyright (C) 2017 Bert Frees
#
# This file is part of liblouis.
#
# liblouis is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# liblouis is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with liblouis. If not, see <http://www.gnu.org/licenses/>.
#
from ctypes import *
from itertools import takewhile, zip_longest, chain, tee
izip = zip
izip_longest = zip_longest
from louis import _loader, liblouis, outlenMultiplier
import re
import sqlite3
import sys
def exit_if_not(expression):
if not expression:
raise RuntimeError()
exit_if_not(liblouis.lou_charSize() == 4)
liblouis.isLetter.argtypes = (c_wchar,)
liblouis.toLowercase.argtypes = (c_wchar,)
liblouis.toLowercase.restype = c_wchar
def println(line=""):
sys.stdout.write(("%s\n" % line))
def printerrln(line=""):
sys.stderr.write(("%s\n" % line))
def validate_chunks(chunked_text):
return re.search(r"^([^)(|]+|\([^)(|][^)(|]+\))(\|?([^)(|]+|\([^)(|][^)(|]+\)))*$", chunked_text) != None
def print_chunks(text, hyphen_string):
exit_if_not(len(hyphen_string) == len(text) - 1 and re.search("^[01x]+$", hyphen_string))
chunked_text = []
k = 0
prev_c = None
for c in hyphen_string:
if c != prev_c and c == "0":
chunked_text.append("(")
chunked_text.append(text[k])
if c != prev_c and prev_c == "0":
chunked_text.append(")")
if c == "1":
chunked_text.append("|")
prev_c = c
k += 1
chunked_text.append(text[k])
if (prev_c == "0"):
chunked_text.append(")")
return "".join(chunked_text)
def parse_chunks(chunked_text):
exit_if_not(validate_chunks(chunked_text))
text, _ = read_text(chunked_text)
hyphen_string = ["x"] * (len(text) - 1)
k = 0
for c in chunked_text:
if c == "(":
hyphen_string[k:] = ["0"] * (len(text) - 1 - k)
elif c == ")":
hyphen_string[k-1:] = ["x"] * (len(text) - 1 - (k-1))
elif c == "|":
hyphen_string[k-1] = "1"
else:
k += 1
if k > len(text):
break
return text, "".join(hyphen_string)
def read_text(maybe_chunked_text):
if re.search("[)(|]", maybe_chunked_text) != None:
text = re.sub("[)(|]", "", maybe_chunked_text)
chunked_text = maybe_chunked_text
else:
text = maybe_chunked_text
chunked_text = None
return text, chunked_text
def compare_chunks(expected_hyphen_string, actual_hyphen_string, text):
exit_if_not(len(expected_hyphen_string) == len(text) - 1 and re.search("^[01x]+$", expected_hyphen_string))
exit_if_not(len(actual_hyphen_string) == len(text) - 1 and re.search("^[01]+$", actual_hyphen_string))
chunk_errors = my_zip(text,
map(lambda e, a: "*" if e in "1x" and a == "1" else
"." if a == "1" else
"-" if e == "1" else None,
expected_hyphen_string, actual_hyphen_string))
return chunk_errors if re.search(r"[-\.]", chunk_errors) else None
# split a string into words consisting of only letters (at least two)
# return an empty list if the provided hyphen string does not have zeros at all positions before and after non-letters
def split_into_words(text, hyphen_string):
exit_if_not(len(hyphen_string) == len(text) - 1 and re.search("^[01x]+$", hyphen_string))
words = []
word_hyphen_strings = []
word = []
word_hyphen_string = []
for c,(h1,h2) in izip(text, pairwise('1' + hyphen_string + '1')):
if is_letter(c):
word.append(c)
word_hyphen_string.append(h1)
elif h1 not in "1x" or h2 not in "1x":
return []
else:
if len(word) > 1:
words.append("".join(word))
word_hyphen_strings.append("".join(word_hyphen_string[1:]))
word = []
word_hyphen_string = []
if len(word) > 1:
words.append("".join(word))
word_hyphen_strings.append("".join(word_hyphen_string[1:]))
return izip(words, word_hyphen_strings)
table = None
def load_table(new_table):
global table
table = new_table
table = table.encode("ASCII") if isinstance(table, str) else bytes(table)
liblouis.loadTable(table);
def is_letter(text):
return all([liblouis.isLetter(c) for c in text])
def to_lowercase(text):
return "".join([liblouis.toLowercase(c) for c in text])
def to_dot_pattern(braille):
c_braille = create_unicode_buffer(braille)
c_dots = create_string_buffer(9 * len(braille))
liblouis.toDotPattern(c_braille, c_dots)
return c_dots.value.decode('ascii')
def hyphenate(text):
c_text = create_unicode_buffer(text)
c_text_len = c_int(len(text))
c_hyphen_string = create_string_buffer(len(text) + 1)
exit_if_not(liblouis.lou_hyphenate(table, c_text, c_text_len, c_hyphen_string, 0))
return "".join(['1' if int(p) % 2 else '0' for p in c_hyphen_string.value[1:]])
def translate(text):
c_text = create_unicode_buffer(text)
c_text_len = c_int(len(text))
braille_len = len(text) * outlenMultiplier
c_braille = create_unicode_buffer(braille_len)
c_braille_len = c_int(braille_len)
max_rules = 16
c_rules = (c_void_p * max_rules)()
c_rules_len = c_int(max_rules)
exit_if_not(liblouis._lou_translate(table, c_text, byref(c_text_len), c_braille, byref(c_braille_len),
None, None, None, None, None, 0, c_rules, byref(c_rules_len)))
return c_braille.value, c_rules[0:c_rules_len.value]
def get_rule(c_rule_pointer):
c_rule_string = create_unicode_buffer(u"", 128)
if not liblouis.printRule(cast(c_rule_pointer, c_void_p), c_rule_string):
return None
return tuple(c_rule_string.value.split(" "))
def suggest_chunks(text, braille):
c_text = create_unicode_buffer(text)
c_braille = create_unicode_buffer(braille)
c_hyphen_string = create_string_buffer(len(text) + 2)
if not liblouis.suggestChunks(c_text, c_braille, c_hyphen_string):
return None;
hyphen_string = c_hyphen_string.value.decode('ascii')
hyphen_string = hyphen_string[1:len(hyphen_string)-1]
assert len(hyphen_string) == len(text) - 1 and re.search("^[01x]+$", hyphen_string)
return hyphen_string
def find_relevant_rules(text):
c_text = create_unicode_buffer(text)
max_rules = 16
c_rules = [u""] * max_rules + [None]
for i in range(0, max_rules):
c_rules[i] = create_unicode_buffer(c_rules[i], 128)
c_rules[i] = cast(c_rules[i], c_wchar_p)
c_rules = (c_wchar_p * (max_rules + 1))(*c_rules)
liblouis.findRelevantRules(c_text, c_rules)
return map(lambda x: tuple(x.split(" ")), takewhile(lambda x: x, c_rules))
def open_dictionary(dictionary):
conn = sqlite3.connect(dictionary)
c = conn.cursor()
return conn, c
def filterfalse(predicate, iterable):
return [x for x in iterable if not predicate(x)]
def partition(pred, iterable):
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
def pairwise(iterable):
a, b = tee(iterable)
next(b, None)
return izip(a, b)
def my_zip(*iterables):
return "".join([x for x in chain(*izip_longest(*iterables)) if x is not None])
class future:
def __init__(self, f):
self.f = f
self.fut = f
self.is_realized = False
def __call__(self):
if not self.is_realized:
self.fut = self.f()
self.is_realized = True
return self.fut
| |
#!/usr/bin/env python
# encoding: utf-8
"""The ipcluster application."""
from __future__ import print_function
import errno
import logging
import os
import re
import signal
from subprocess import check_call, CalledProcessError, PIPE
import zmq
from traitlets.config.application import catch_config_error
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir
from ipython_genutils.importstring import import_item
from ipython_genutils.py3compat import string_types
from IPython.utils.sysinfo import num_cpus
from traitlets import (Integer, Unicode, Bool, CFloat, Dict, List, Any,
DottedObjectName)
from .baseapp import (
BaseParallelApplication,
PIDFileError,
base_flags, base_aliases
)
from .daemonize import daemonize
#-----------------------------------------------------------------------------
# Module level variables
#-----------------------------------------------------------------------------
_description = """Start an IPython cluster for parallel computing.
An IPython cluster consists of 1 controller and 1 or more engines.
This command automates the startup of these processes using a wide range of
startup methods (SSH, local processes, PBS, mpiexec, SGE, LSF, HTCondor,
Windows HPC Server 2008). To start a cluster with 4 engines on your
local host simply do 'ipcluster start --n=4'. For more complex usage
you will typically do 'ipython profile create mycluster --parallel', then edit
configuration files, followed by 'ipcluster start --profile=mycluster --n=4'.
"""
_main_examples = """
ipcluster start --n=4 # start a 4 node cluster on localhost
ipcluster start -h # show the help string for the start subcmd
ipcluster stop -h # show the help string for the stop subcmd
ipcluster engines -h # show the help string for the engines subcmd
"""
_start_examples = """
ipython profile create mycluster --parallel # create mycluster profile
ipcluster start --profile=mycluster --n=4 # start mycluster with 4 nodes
"""
_stop_examples = """
ipcluster stop --profile=mycluster # stop a running cluster by profile name
"""
_engines_examples = """
ipcluster engines --profile=mycluster --n=4 # start 4 engines only
"""
# Exit codes for ipcluster
# This will be the exit code if the ipcluster appears to be running because
# a .pid file exists
ALREADY_STARTED = 10
# This will be the exit code if ipcluster stop is run, but there is not .pid
# file to be found.
ALREADY_STOPPED = 11
# This will be the exit code if ipcluster engines is run, but there is not .pid
# file to be found.
NO_CLUSTER = 12
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def find_launcher_class(clsname, kind):
"""Return a launcher for a given clsname and kind.
Parameters
==========
clsname : str
The full name of the launcher class, either with or without the
module path, or an abbreviation (MPI, SSH, SGE, PBS, LSF, HTCondor
WindowsHPC).
kind : str
Either 'EngineSet' or 'Controller'.
"""
if '.' not in clsname:
# not a module, presume it's the raw name in apps.launcher
if kind and kind not in clsname:
# doesn't match necessary full class name, assume it's
# just 'PBS' or 'MPI' etc prefix:
clsname = clsname + kind + 'Launcher'
clsname = 'ipyparallel.apps.launcher.'+clsname
klass = import_item(clsname)
return klass
#-----------------------------------------------------------------------------
# Main application
#-----------------------------------------------------------------------------
start_help = """Start an IPython cluster for parallel computing
Start an ipython cluster by its profile name or cluster
directory. Cluster directories contain configuration, log and
security related files and are named using the convention
'profile_<name>' and should be creating using the 'start'
subcommand of 'ipcluster'. If your cluster directory is in
the cwd or the ipython directory, you can simply refer to it
using its profile name, 'ipcluster start --n=4 --profile=<profile>`,
otherwise use the 'profile-dir' option.
"""
stop_help = """Stop a running IPython cluster
Stop a running ipython cluster by its profile name or cluster
directory. Cluster directories are named using the convention
'profile_<name>'. If your cluster directory is in
the cwd or the ipython directory, you can simply refer to it
using its profile name, 'ipcluster stop --profile=<profile>`, otherwise
use the '--profile-dir' option.
"""
engines_help = """Start engines connected to an existing IPython cluster
Start one or more engines to connect to an existing Cluster
by profile name or cluster directory.
Cluster directories contain configuration, log and
security related files and are named using the convention
'profile_<name>' and should be creating using the 'start'
subcommand of 'ipcluster'. If your cluster directory is in
the cwd or the ipython directory, you can simply refer to it
using its profile name, 'ipcluster engines --n=4 --profile=<profile>`,
otherwise use the 'profile-dir' option.
"""
stop_aliases = dict(
signal='IPClusterStop.signal',
)
stop_aliases.update(base_aliases)
class IPClusterStop(BaseParallelApplication):
name = u'ipcluster'
description = stop_help
examples = _stop_examples
signal = Integer(signal.SIGINT, config=True,
help="signal to use for stopping processes.")
aliases = Dict(stop_aliases)
def start(self):
"""Start the app for the stop subcommand."""
try:
pid = self.get_pid_from_file()
except PIDFileError:
self.log.critical(
'Could not read pid file, cluster is probably not running.'
)
# Here I exit with a unusual exit status that other processes
# can watch for to learn how I existed.
self.remove_pid_file()
self.exit(ALREADY_STOPPED)
if not self.check_pid(pid):
self.log.critical(
'Cluster [pid=%r] is not running.' % pid
)
self.remove_pid_file()
# Here I exit with a unusual exit status that other processes
# can watch for to learn how I existed.
self.exit(ALREADY_STOPPED)
elif os.name=='posix':
sig = self.signal
self.log.info(
"Stopping cluster [pid=%r] with [signal=%r]" % (pid, sig)
)
try:
os.kill(pid, sig)
except OSError:
self.log.error("Stopping cluster failed, assuming already dead.",
exc_info=True)
self.remove_pid_file()
elif os.name=='nt':
try:
# kill the whole tree
check_call(['taskkill', '-pid', str(pid), '-t', '-f'], stdout=PIPE,stderr=PIPE)
except (CalledProcessError, OSError):
self.log.error("Stopping cluster failed, assuming already dead.",
exc_info=True)
self.remove_pid_file()
engine_aliases = {}
engine_aliases.update(base_aliases)
engine_aliases.update(dict(
n='IPClusterEngines.n',
engines = 'IPClusterEngines.engine_launcher_class',
daemonize = 'IPClusterEngines.daemonize',
))
engine_flags = {}
engine_flags.update(base_flags)
engine_flags.update(dict(
daemonize=(
{'IPClusterEngines' : {'daemonize' : True}},
"""run the cluster into the background (not available on Windows)""",
)
))
class IPClusterEngines(BaseParallelApplication):
name = u'ipcluster'
description = engines_help
examples = _engines_examples
usage = None
default_log_level = logging.INFO
classes = List()
def _classes_default(self):
from ipyparallel.apps import launcher
launchers = launcher.all_launchers
eslaunchers = [ l for l in launchers if 'EngineSet' in l.__name__]
return [ProfileDir]+eslaunchers
n = Integer(num_cpus(), config=True,
help="""The number of engines to start. The default is to use one for each
CPU on your machine""")
engine_launcher = Any(config=True, help="Deprecated, use engine_launcher_class")
def _engine_launcher_changed(self, name, old, new):
if isinstance(new, string_types):
self.log.warn("WARNING: %s.engine_launcher is deprecated as of 0.12,"
" use engine_launcher_class" % self.__class__.__name__)
self.engine_launcher_class = new
engine_launcher_class = DottedObjectName('LocalEngineSetLauncher',
config=True,
help="""The class for launching a set of Engines. Change this value
to use various batch systems to launch your engines, such as PBS,SGE,MPI,etc.
Each launcher class has its own set of configuration options, for making sure
it will work in your environment.
You can also write your own launcher, and specify it's absolute import path,
as in 'mymodule.launcher.FTLEnginesLauncher`.
IPython's bundled examples include:
Local : start engines locally as subprocesses [default]
MPI : use mpiexec to launch engines in an MPI environment
PBS : use PBS (qsub) to submit engines to a batch queue
SGE : use SGE (qsub) to submit engines to a batch queue
LSF : use LSF (bsub) to submit engines to a batch queue
SSH : use SSH to start the controller
Note that SSH does *not* move the connection files
around, so you will likely have to do this manually
unless the machines are on a shared file system.
HTCondor : use HTCondor to submit engines to a batch queue
WindowsHPC : use Windows HPC
If you are using one of IPython's builtin launchers, you can specify just the
prefix, e.g:
c.IPClusterEngines.engine_launcher_class = 'SSH'
or:
ipcluster start --engines=MPI
"""
)
daemonize = Bool(False, config=True,
help="""Daemonize the ipcluster program. This implies --log-to-file.
Not available on Windows.
""")
def _daemonize_changed(self, name, old, new):
if new:
self.log_to_file = True
early_shutdown = Integer(30, config=True, help="The timeout (in seconds)")
_stopping = False
aliases = Dict(engine_aliases)
flags = Dict(engine_flags)
@catch_config_error
def initialize(self, argv=None):
super(IPClusterEngines, self).initialize(argv)
self.init_signal()
self.init_launchers()
def init_launchers(self):
self.engine_launcher = self.build_launcher(self.engine_launcher_class, 'EngineSet')
def init_signal(self):
# Setup signals
signal.signal(signal.SIGINT, self.sigint_handler)
def build_launcher(self, clsname, kind=None):
"""import and instantiate a Launcher based on importstring"""
try:
klass = find_launcher_class(clsname, kind)
except (ImportError, KeyError):
self.log.fatal("Could not import launcher class: %r"%clsname)
self.exit(1)
launcher = klass(
work_dir=u'.', parent=self, log=self.log,
profile_dir=self.profile_dir.location, cluster_id=self.cluster_id,
)
return launcher
def engines_started_ok(self):
self.log.info("Engines appear to have started successfully")
self.early_shutdown = 0
def start_engines(self):
# Some EngineSetLaunchers ignore `n` and use their own engine count, such as SSH:
n = getattr(self.engine_launcher, 'engine_count', self.n)
self.log.info("Starting %s Engines with %s", n, self.engine_launcher_class)
try:
self.engine_launcher.start(self.n)
except:
self.log.exception("Engine start failed")
raise
self.engine_launcher.on_stop(self.engines_stopped_early)
if self.early_shutdown:
self.loop.add_timeout(self.loop.time() + self.early_shutdown, self.engines_started_ok)
def engines_stopped_early(self, r):
if self.early_shutdown and not self._stopping:
self.log.error("""
Engines shutdown early, they probably failed to connect.
Check the engine log files for output.
If your controller and engines are not on the same machine, you probably
have to instruct the controller to listen on an interface other than localhost.
You can set this by adding "--ip='*'" to your ControllerLauncher.controller_args.
Be sure to read our security docs before instructing your controller to listen on
a public interface.
""")
self.stop_launchers()
return self.engines_stopped(r)
def engines_stopped(self, r):
return self.loop.stop()
def stop_engines(self):
if self.engine_launcher.running:
self.log.info("Stopping Engines...")
d = self.engine_launcher.stop()
return d
else:
return None
def stop_launchers(self, r=None):
if not self._stopping:
self._stopping = True
self.log.error("IPython cluster: stopping")
self.stop_engines()
# Wait a few seconds to let things shut down.
self.loop.add_timeout(self.loop.time() + 3, self.loop.stop)
def sigint_handler(self, signum, frame):
self.log.debug("SIGINT received, stopping launchers...")
self.stop_launchers()
def start_logging(self):
# Remove old log files of the controller and engine
if self.clean_logs:
log_dir = self.profile_dir.log_dir
for f in os.listdir(log_dir):
if re.match(r'ip(engine|controller)-.+\.(log|err|out)',f):
os.remove(os.path.join(log_dir, f))
def start(self):
"""Start the app for the engines subcommand."""
self.log.info("IPython cluster: started")
# First see if the cluster is already running
# Now log and daemonize
self.log.info(
'Starting engines with [daemon=%r]' % self.daemonize
)
# TODO: Get daemonize working on Windows or as a Windows Server.
if self.daemonize:
if os.name=='posix':
daemonize()
self.loop.add_callback(self.start_engines)
# Now write the new pid file AFTER our new forked pid is active.
# self.write_pid_file()
try:
self.loop.start()
except KeyboardInterrupt:
pass
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
pass
else:
raise
start_aliases = {}
start_aliases.update(engine_aliases)
start_aliases.update(dict(
delay='IPClusterStart.delay',
controller='IPClusterStart.controller_launcher_class',
ip='IPClusterStart.controller_ip',
))
start_aliases['clean-logs'] = 'IPClusterStart.clean_logs'
class IPClusterStart(IPClusterEngines):
name = u'ipcluster'
description = start_help
examples = _start_examples
default_log_level = logging.INFO
auto_create = Bool(True, config=True,
help="whether to create the profile_dir if it doesn't exist")
classes = List()
def _classes_default(self,):
from ipyparallel.apps import launcher
return [ProfileDir] + [IPClusterEngines] + launcher.all_launchers
clean_logs = Bool(True, config=True,
help="whether to cleanup old logs before starting")
delay = CFloat(1., config=True,
help="delay (in s) between starting the controller and the engines")
controller_ip = Unicode(config=True, help="Set the IP address of the controller.")
controller_launcher = Any(config=True, help="Deprecated, use controller_launcher_class")
def _controller_launcher_changed(self, name, old, new):
if isinstance(new, string_types):
# old 0.11-style config
self.log.warn("WARNING: %s.controller_launcher is deprecated as of 0.12,"
" use controller_launcher_class" % self.__class__.__name__)
self.controller_launcher_class = new
controller_launcher_class = DottedObjectName('LocalControllerLauncher',
config=True,
help="""The class for launching a Controller. Change this value if you want
your controller to also be launched by a batch system, such as PBS,SGE,MPI,etc.
Each launcher class has its own set of configuration options, for making sure
it will work in your environment.
Note that using a batch launcher for the controller *does not* put it
in the same batch job as the engines, so they will still start separately.
IPython's bundled examples include:
Local : start engines locally as subprocesses
MPI : use mpiexec to launch the controller in an MPI universe
PBS : use PBS (qsub) to submit the controller to a batch queue
SGE : use SGE (qsub) to submit the controller to a batch queue
LSF : use LSF (bsub) to submit the controller to a batch queue
HTCondor : use HTCondor to submit the controller to a batch queue
SSH : use SSH to start the controller
WindowsHPC : use Windows HPC
If you are using one of IPython's builtin launchers, you can specify just the
prefix, e.g:
c.IPClusterStart.controller_launcher_class = 'SSH'
or:
ipcluster start --controller=MPI
"""
)
reset = Bool(False, config=True,
help="Whether to reset config files as part of '--create'."
)
# flags = Dict(flags)
aliases = Dict(start_aliases)
def init_launchers(self):
self.controller_launcher = self.build_launcher(self.controller_launcher_class, 'Controller')
if self.controller_ip:
self.controller_launcher.controller_args.append('--ip=%s' % self.controller_ip)
self.engine_launcher = self.build_launcher(self.engine_launcher_class, 'EngineSet')
def engines_stopped(self, r):
"""prevent parent.engines_stopped from stopping everything on engine shutdown"""
pass
def start_controller(self):
self.log.info("Starting Controller with %s", self.controller_launcher_class)
self.controller_launcher.on_stop(self.stop_launchers)
try:
self.controller_launcher.start()
except:
self.log.exception("Controller start failed")
raise
def stop_controller(self):
# self.log.info("In stop_controller")
if self.controller_launcher and self.controller_launcher.running:
return self.controller_launcher.stop()
def stop_launchers(self, r=None):
if not self._stopping:
self.stop_controller()
super(IPClusterStart, self).stop_launchers()
def start(self):
"""Start the app for the start subcommand."""
# First see if the cluster is already running
try:
pid = self.get_pid_from_file()
except PIDFileError:
pass
else:
if self.check_pid(pid):
self.log.critical(
'Cluster is already running with [pid=%s]. '
'use "ipcluster stop" to stop the cluster.' % pid
)
# Here I exit with a unusual exit status that other processes
# can watch for to learn how I existed.
self.exit(ALREADY_STARTED)
else:
self.remove_pid_file()
# Now log and daemonize
self.log.info(
'Starting ipcluster with [daemon=%r]' % self.daemonize
)
# TODO: Get daemonize working on Windows or as a Windows Server.
if self.daemonize:
if os.name=='posix':
daemonize()
def start():
self.start_controller()
self.loop.add_timeout(self.loop.time() + self.delay, self.start_engines)
self.loop.add_callback(start)
# Now write the new pid file AFTER our new forked pid is active.
self.write_pid_file()
try:
self.loop.start()
except KeyboardInterrupt:
pass
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
pass
else:
raise
finally:
self.remove_pid_file()
class IPClusterNBExtension(BaseIPythonApplication):
"""Enable/disable ipcluster tab extension in Jupyter notebook"""
name = 'ipcluster-nbextension'
description = """Enable/disable IPython clusters tab in Jupyter notebook"""
examples = """
ipcluster nbextension enable
ipcluster nbextension disable
"""
def start(self):
from ipyparallel.nbextension.install import install_server_extension
if len(self.extra_args) != 1:
self.exit("Must specify 'enable' or 'disable'")
action = self.extra_args[0].lower()
if action == 'enable':
print("Enabling IPython clusters tab")
install_server_extension(enable=True)
elif action == 'disable':
print("Disabling IPython clusters tab")
install_server_extension(enable=False)
else:
self.exit("Must specify 'enable' or 'disable', not '%s'" % action)
base = 'ipyparallel.apps.ipclusterapp.IPCluster'
class IPClusterApp(BaseIPythonApplication):
name = u'ipcluster'
description = _description
examples = _main_examples
subcommands = {
'start' : (base+'Start', start_help),
'stop' : (base+'Stop', stop_help),
'engines' : (base+'Engines', engines_help),
'nbextension': (base+'NBExtension', IPClusterNBExtension.description)
}
# no aliases or flags for parent App
aliases = Dict()
flags = Dict()
def start(self):
if self.subapp is None:
print("No subcommand specified. Must specify one of: %s"%(self.subcommands.keys()))
print()
self.print_description()
self.print_subcommands()
self.exit(1)
else:
return self.subapp.start()
launch_new_instance = IPClusterApp.launch_instance
if __name__ == '__main__':
launch_new_instance()
| |
import sys
import os
import time
from array import array as Array
from nysa.host.driver.utils import *
from collections import OrderedDict
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir))
from nysa.host.driver import driver
DEVICE_TYPE = "Logic Analyzer"
SDB_ABI_VERSION_MINOR = 1
SDB_VENDOR_ID = 0x800000000000C594
CONTROL_RESET = 0
CONTROL_ENABLE_INTERRUPT = 1
CONTROL_ENABLE_LA = 2
CONTROL_RESTART_LA = 3
CONTROL_FORCE_STB = 4
CONTROL_ENABLE_UART = 5
STATUS_FINISHED = 0
#Addresses
CONTROL = 0x00
STATUS = 0x01
TRIGGER = 0x02
TRIGGER_MASK = 0x03
TRIGGER_AFTER = 0x04
TRIGGER_EDGE = 0x05
BOTH_EDGES = 0x06
REPEAT_COUNT = 0x07
DATA_COUNT = 0x08
START_POS = 0x09
CLOCK_RATE = 0x0A
READ_DATA = 0x0B
class LogicAnalyzerException(Exception):
pass
class LogicAnalyzer(driver.Driver):
""" wb_logic_analyser
Communication with a logic analyzer
"""
@staticmethod
def get_abi_class():
return 0
@staticmethod
def get_abi_major():
return driver.get_device_id_from_name(DEVICE_TYPE)
@staticmethod
def get_abi_minor():
return SDB_ABI_VERSION_MINOR
@staticmethod
def get_vendor_id():
return SDB_VENDOR_ID
def __init__(self, nysa, urn, debug = False):
super(LogicAnalyzer, self).__init__(nysa, urn, debug)
#Perform this strange read write so that we can disable the UART
# Controller
control = self.read_register(CONTROL)
#print "control: 0x%08X" % control
self.data_count = self.get_data_count()
#print "Count: 0x%08X" % self.data_count
self.enable_uart_control(False)
def reset(self):
self.set_register_bit(CONTROL, CONTROL_RESET)
def enable_interrupts(self, enable):
self.enable_register_bit(CONTROL, CONTROL_ENABLE_INTERRUPT, enable)
def enable_uart_control(self, enable):
self.enable_register_bit(CONTROL, CONTROL_ENABLE_UART, enable)
def is_uart_enabled(self):
return self.is_register_bit_set(CONTROL, CONTROL_ENABLE_UART)
def is_interrupts_enabled(self):
return self.is_register_bit_set(CONTROL, CONTROL_ENABLE_INTERRUPT)
def enable(self, enable):
self.enable_register_bit(CONTROL, CONTROL_ENABLE_LA, enable)
def is_enabled(self):
return self.is_register_bit_set(CONTROL, CONTROL_ENABLE_LA)
def restart(self):
self.set_register_bit(CONTROL, CONTROL_RESTART_LA)
def is_finished(self):
return self.is_register_bit_set(STATUS, STATUS_FINISHED)
def force_trigger(self):
self.enable_register_bit(CONTROL, CONTROL_FORCE_STB, True)
def set_trigger(self, trigger):
self.write_register(TRIGGER, trigger)
def get_trigger(self):
return self.read_register(TRIGGER)
def set_trigger_mask(self, trigger_mask):
self.write_register(TRIGGER_MASK, trigger_mask)
def get_trigger_mask(self):
return self.read_register(TRIGGER_MASK)
def set_trigger_after(self, trigger_after):
self.write_register(TRIGGER_AFTER, trigger_after)
def get_trigger_after(self):
return self.read_register(TRIGGER_AFTER)
def set_trigger_edge(self, trigger_edge):
self.write_register(TRIGGER_EDGE, trigger_edge)
def get_trigger_edge(self):
return self.read_register(TRIGGER_EDGE)
def set_both_edge(self, both_edges):
self.write_register(BOTH_EDGES, both_edges)
def get_both_edge(self):
return self.read_register(BOTH_EDGES)
def set_repeat_count(self, repeat_count):
self.write_register(REPEAT_COUNT, repeat_count)
def get_repeat_count(self):
return self.read_register(REPEAT_COUNT)
def get_data_count(self):
return self.read_register(DATA_COUNT)
def get_start_pos(self):
return self.read_register(START_POS)
def read_raw_data(self):
return self.read(READ_DATA, self.data_count, disable_auto_inc = True)
def read_data(self):
start_pos = self.read_register(START_POS)
raw_data = self.read(READ_DATA, self.data_count, disable_auto_inc = True)
#Need to reorder the data so it makes sense for the user
temp = Array('L')
for i in range (0, len(raw_data), 4):
temp.append(array_to_dword(raw_data[i: i + 4]))
'''
for i in range (0, len(temp), 1):
print "\t[%04X] 0x%08X" % (i, temp[i])
'''
print "Start Pos: 0x%04X" % start_pos
#Change data to 32-bit array
data = Array('L')
if start_pos == 0:
data = temp
data.extend(temp[start_pos:])
data.extend(temp[0:start_pos])
return data
def get_clock_rate(self):
return self.read_register(CLOCK_RATE)
def set_vcd_header():
#set date
buf = ""
buf += "$date\n"
buf += time.strftime("%b %d, %Y %H:%M:%S") + "\n"
buf += "$end\n"
buf += "\n"
#set version
buf += "$version\n"
buf += "\tNysa Logic Analyzer V0.1\n"
buf += "$end\n"
buf += "\n"
#set the timescale
buf += "$timescale\n"
buf += "\t1 ns\n"
buf += "$end\n"
buf += "\n"
return buf
def set_signal_names(signal_dict, add_clock):
buf = ""
#set the scope
buf += "$scope\n"
buf += "$module logic_analyzer\n"
buf += "$end\n"
buf += "\n"
offset = 0
char_offset = 33
if add_clock:
character_alias = char_offset
buf += "$var wire 1 %c clk $end\n" % (character_alias)
char_offset = 34
offset = 0
for name in signal_dict:
character_alias = char_offset + offset
buf += "$var wire %d %c %s $end\n" % (signal_dict[name], character_alias, name)
offset += 1
#Pop of the scope stack
buf += "\n"
buf += "$upscope\n"
buf += "$end\n"
buf += "\n"
#End the signal name defnitions
buf += "$enddefinitions\n"
buf += "$end\n"
return buf
def set_waveforms(data, signal_dict, add_clock, cycles_per_clock, debug = False):
buf = ""
buf += "#0\n"
buf += "$dumpvars\n"
timeval = 0
if debug: print "Cycles per clock: %d" % cycles_per_clock
index_offset = 33
clock_character = 33
if add_clock:
index_offset = 34
#Time 0
#Add in the initial Clock Edge
if add_clock:
buf += "%d%c\n" % (0, clock_character)
for i in range(len(signal_dict)):
buf += "x%c\n" % (index_offset + i)
#Time 1/2 clock cycle
if add_clock:
buf += "#%d\n" % (cycles_per_clock / 2)
buf += "%d%c\n" % (0, clock_character)
if add_clock:
buf += "#%d\n" % ((i + 1) * cycles_per_clock)
buf += "%d%c\n" % (1, clock_character)
for j in range (len(signal_dict)):
buf += "%d%c\n" % (((data[0] >> j) & 0x01), (index_offset + j))
#Time 1/2 clock cycle
if add_clock:
buf += "#%d\n" % (cycles_per_clock / 2)
buf += "%d%c\n" % (0, clock_character)
#Go through all the values for every time instance and look for changes
if debug: print "Data Length: %d" % len(data)
for i in range(1, len(data)):
if add_clock:
buf += "#%d\n" % ((i + 1) * cycles_per_clock)
buf += "%d%c\n" % (1, clock_character)
#Read up to the second to the last peice of data
if data[i - 1] != data[i]:
if not add_clock:
buf += "#%d\n" % ((i + 1) * cycles_per_clock)
for j in range (len(signal_dict)):
if ((data[i - 1] >> j) & 0x01) != ((data[i] >> j) & 0x01):
buf += "%d%c\n" % (((data[i] >> j) & 0x01), (index_offset + j))
if add_clock:
buf += "#%d\n" % (((i + 1) * cycles_per_clock) + (cycles_per_clock / 2))
buf += "%d%c\n" % (0, clock_character)
buf += "#%d\n" % (len(data) * cycles_per_clock)
for i in range(len(signal_dict)):
buf += "%d%c\n" % (((data[-1] >> i) & 0x01), (33 + i))
return buf
def create_vcd_buffer(data, signal_dict = OrderedDict(), count = 32, clock_count = 100, add_clock = True, debug = False):
if debug: print "Create a VCD file"
print "clock count: %d" % clock_count
ghertz_freq = 1000000000
if clock_count == 0:
clock_count = 100000000
cycles_per_clock = int(ghertz_freq / clock_count)
if debug: print "Clocks per cycle: %d" % cycles_per_clock
if len(signal_dict) < count:
for i in range(count):
signal_dict["signal%d" % i] = 1
buf = ""
buf += set_vcd_header()
buf += set_signal_names(signal_dict, add_clock)
buf += set_waveforms(data, signal_dict, add_clock, cycles_per_clock, debug)
return buf
| |
# -*- coding: utf-8 -*-
import logging
import subprocess
from pyramid.view import view_config
from ..app.sqla import DBSession
from .models import Workspace
from . import WorkspaceManager
from ..maps import MapManager
log = logging.getLogger(__name__)
class APIWorkspace(object):
def __init__(self, request):
self.request = request
self.matchdict = request.matchdict
@view_config(
route_name='workspaces.new',
renderer='json',
request_method='POST'
)
def new_workspace(self):
response = {
'status': 0,
'errors': [],
'id': ''
}
try:
name = self.request.POST.get('name')
except KeyError as e:
response['errors'].append('A name is required.')
try:
password = self.request.POST.get('password')
except KeyError as e:
password = None
if len(response['errors']) == 0:
if not WorkspaceManager.is_valid_name(name):
response['errors'].append('Name is not valid.')
if Workspace.by_name(name):
response['errors'].append('A workspace with that name already exists.')
if len(response['errors']) == 0:
workspaces_directory = self.request.registry.settings.get('workspaces.directory', '') + '/'
workspace_directory = workspaces_directory + name
try:
subprocess.call(['mkdir', workspace_directory])
except subprocess.CalledProcessError as e:
response['errors'].append(e.output)
if len(response['errors']) == 0:
kwargs = {
'name': name,
'password': Workspace.encode_password(password) if password else None
}
workspace = Workspace(**kwargs)
try:
DBSession.add(workspace)
except exc.SQLAlchemyError as e:
response['errors'].append(e)
if len(response['errors']) == 0:
response['status'] = 1
else:
try:
subprocess.call(['rm', '-r', workspace_directory])
except subprocess.CalledProcessError as e:
pass
return response
@view_config(
route_name='workspaces.delete',
renderer='json',
request_method='POST'
)
def delete_workspace(self):
response = {
'status': 0,
'errors': []
}
try:
name = self.request.POST.get('name')
except KeyError as e:
response['errors'].append("You're not telling me which workspace to delete.")
try:
password = self.request.POST.get('password')
except KeyError as e:
password = None
if len(response['errors']) == 0:
if name == 'default':
workspace = None
response['errors'].append('The default workspace cannot be deleted.')
else:
workspace = Workspace.authenticate(name, password)
if workspace:
workspace_directory = self.request.registry.settings.get('workspaces.directory', '') + '/' + name
try:
subprocess.call(['rm','-r', workspace_directory])
except subprocess.CalledProcessError as e:
response['errors'].append(e.output)
if len(response['errors']) == 0:
maps = workspace.get_maps()
for map in maps:
connector_file = self.request.registry.settings.get('cgi.directory', '') + \
'/elfinder-python/connector-' + name + '-' + map.name + '.py'
try:
subprocess.call(['rm', connector_file])
except subprocess.CalledProcessError as e:
response['errors'].append(e.output)
if len(response['errors']) == 0:
try:
DBSession.delete(map)
except exc.SQLAlchemyError as e:
response['errors'].append(e)
if len(response['errors']) == 0:
try:
DBSession.delete(workspace)
response['status'] = 1
except exc.SQLAlchemyError as e:
response['errors'].append(e)
else:
response['errors'].append('Access denied.')
return response
#Return the list of workspaces
@view_config(
route_name='workspaces.all',
permission='view',
renderer='json',
request_method='GET'
)
def all(self):
response = {
'status': 1,
'errors': [],
'workspaces': []
}
workspaces = DBSession.query(Workspace.id, Workspace.name) \
.order_by(Workspace.name).all()
for workspace in workspaces:
response['workspaces'].append({
'id': workspace[0],
'name': workspace[1]
})
return response
@view_config(
route_name='workspace.maps',
permission='view',
renderer='json',
request_method='POST'
)
def maps(self):
response = {
'status': 0,
'errors': [],
'maps': []
}
try:
name = self.request.POST.get('name')
except KeyError as e:
response['errors'].append('A name is required.')
try:
password = self.request.POST.get('password')
except KeyError as e:
password = None
try:
type = self.request.POST.get('type')
except KeyError as e:
type = None
if len(response['errors']) == 0:
if name != 'default' and name != self.request.userid:
workspace = Workspace().authenticate(name, password)
if not workspace:
response['errors'].append('Wrong credentials.')
else:
workspace = Workspace.by_name(name)
if workspace:
maps = workspace.get_maps(type)
for map in maps:
mapserver_url = self.request.registry.settings.get('mapserver.url', '')
mapfile_directory = self.request.registry.settings.get('workspaces.directory', '') + '/'
mapfile_directory += workspace.name + '/' + map.name + '/map/' + map.name + '.map'
(url, thumbnail_url) = MapManager.get_urls(
name=map.name,
extent=map.extent,
projection=map.projection,
mapfile_directory=mapfile_directory,
mapserver_url=mapserver_url
)
obj_map = dict(map)
obj_map['url'] = url
obj_map['thumbnail_url'] = thumbnail_url
response['maps'].append(obj_map)
response['status'] = 1
return response
| |
"""Test zha climate."""
import pytest
import zigpy.zcl.clusters
from zigpy.zcl.clusters.hvac import Thermostat
import zigpy.zcl.foundation as zcl_f
from homeassistant.components.climate.const import (
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_FAN_MODES,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_PRESET_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
FAN_AUTO,
FAN_LOW,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_TEMPERATURE,
)
from homeassistant.components.zha.climate import (
DOMAIN,
HVAC_MODE_2_SYSTEM,
SEQ_OF_OPERATION,
)
from homeassistant.const import ATTR_ENTITY_ID, ATTR_TEMPERATURE, STATE_UNKNOWN
from .common import async_enable_traffic, find_entity_id, send_attributes_report
from tests.async_mock import patch
CLIMATE = {
1: {
"device_type": zigpy.profiles.zha.DeviceType.THERMOSTAT,
"in_clusters": [
zigpy.zcl.clusters.general.Basic.cluster_id,
zigpy.zcl.clusters.general.Identify.cluster_id,
zigpy.zcl.clusters.hvac.Thermostat.cluster_id,
zigpy.zcl.clusters.hvac.UserInterface.cluster_id,
],
"out_clusters": [zigpy.zcl.clusters.general.Ota.cluster_id],
}
}
CLIMATE_FAN = {
1: {
"device_type": zigpy.profiles.zha.DeviceType.THERMOSTAT,
"in_clusters": [
zigpy.zcl.clusters.general.Basic.cluster_id,
zigpy.zcl.clusters.general.Identify.cluster_id,
zigpy.zcl.clusters.hvac.Fan.cluster_id,
zigpy.zcl.clusters.hvac.Thermostat.cluster_id,
zigpy.zcl.clusters.hvac.UserInterface.cluster_id,
],
"out_clusters": [zigpy.zcl.clusters.general.Ota.cluster_id],
}
}
CLIMATE_SINOPE = {
1: {
"device_type": zigpy.profiles.zha.DeviceType.THERMOSTAT,
"in_clusters": [
zigpy.zcl.clusters.general.Basic.cluster_id,
zigpy.zcl.clusters.general.Identify.cluster_id,
zigpy.zcl.clusters.hvac.Thermostat.cluster_id,
zigpy.zcl.clusters.hvac.UserInterface.cluster_id,
65281,
],
"out_clusters": [zigpy.zcl.clusters.general.Ota.cluster_id, 65281],
"profile_id": 260,
},
}
CLIMATE_ZEN = {
1: {
"device_type": zigpy.profiles.zha.DeviceType.THERMOSTAT,
"in_clusters": [
zigpy.zcl.clusters.general.Basic.cluster_id,
zigpy.zcl.clusters.general.Identify.cluster_id,
zigpy.zcl.clusters.hvac.Fan.cluster_id,
zigpy.zcl.clusters.hvac.Thermostat.cluster_id,
zigpy.zcl.clusters.hvac.UserInterface.cluster_id,
],
"out_clusters": [zigpy.zcl.clusters.general.Ota.cluster_id],
}
}
MANUF_SINOPE = "Sinope Technologies"
MANUF_ZEN = "Zen Within"
ZCL_ATTR_PLUG = {
"abs_min_heat_setpoint_limit": 800,
"abs_max_heat_setpoint_limit": 3000,
"abs_min_cool_setpoint_limit": 2000,
"abs_max_cool_setpoint_limit": 4000,
"ctrl_seqe_of_oper": Thermostat.ControlSequenceOfOperation.Cooling_and_Heating,
"local_temp": None,
"max_cool_setpoint_limit": 3900,
"max_heat_setpoint_limit": 2900,
"min_cool_setpoint_limit": 2100,
"min_heat_setpoint_limit": 700,
"occupancy": 1,
"occupied_cooling_setpoint": 2500,
"occupied_heating_setpoint": 2200,
"pi_cooling_demand": None,
"pi_heating_demand": None,
"running_mode": Thermostat.RunningMode.Off,
"running_state": None,
"system_mode": Thermostat.SystemMode.Off,
"unoccupied_heating_setpoint": 2200,
"unoccupied_cooling_setpoint": 2300,
}
@pytest.fixture
def device_climate_mock(hass, zigpy_device_mock, zha_device_joined):
"""Test regular thermostat device."""
async def _dev(clusters, plug=None, manuf=None):
if plug is None:
plugged_attrs = ZCL_ATTR_PLUG
else:
plugged_attrs = {**ZCL_ATTR_PLUG, **plug}
async def _read_attr(attrs, *args, **kwargs):
res = {}
failed = {}
for attr in attrs:
if attr in plugged_attrs:
res[attr] = plugged_attrs[attr]
else:
failed[attr] = zcl_f.Status.UNSUPPORTED_ATTRIBUTE
return res, failed
zigpy_device = zigpy_device_mock(clusters, manufacturer=manuf)
zigpy_device.endpoints[1].thermostat.read_attributes.side_effect = _read_attr
zha_device = await zha_device_joined(zigpy_device)
await async_enable_traffic(hass, [zha_device])
await hass.async_block_till_done()
return zha_device
return _dev
@pytest.fixture
async def device_climate(device_climate_mock):
"""Plain Climate device."""
return await device_climate_mock(CLIMATE)
@pytest.fixture
async def device_climate_fan(device_climate_mock):
"""Test thermostat with fan device."""
return await device_climate_mock(CLIMATE_FAN)
@pytest.fixture
@patch.object(
zigpy.zcl.clusters.manufacturer_specific.ManufacturerSpecificCluster,
"ep_attribute",
"sinope_manufacturer_specific",
)
async def device_climate_sinope(device_climate_mock):
"""Sinope thermostat."""
return await device_climate_mock(CLIMATE_SINOPE, manuf=MANUF_SINOPE)
@pytest.fixture
async def device_climate_zen(device_climate_mock):
"""Zen Within thermostat."""
return await device_climate_mock(CLIMATE_ZEN, manuf=MANUF_ZEN)
def test_sequence_mappings():
"""Test correct mapping between control sequence -> HVAC Mode -> Sysmode."""
for hvac_modes in SEQ_OF_OPERATION.values():
for hvac_mode in hvac_modes:
assert hvac_mode in HVAC_MODE_2_SYSTEM
assert Thermostat.SystemMode(HVAC_MODE_2_SYSTEM[hvac_mode]) is not None
async def test_climate_local_temp(hass, device_climate):
"""Test local temperature."""
thrm_cluster = device_climate.device.endpoints[1].thermostat
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_CURRENT_TEMPERATURE] is None
await send_attributes_report(hass, thrm_cluster, {0: 2100})
state = hass.states.get(entity_id)
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 21.0
async def test_climate_hvac_action_running_state(hass, device_climate):
"""Test hvac action via running state."""
thrm_cluster = device_climate.device.endpoints[1].thermostat
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
await send_attributes_report(
hass, thrm_cluster, {0x001E: Thermostat.RunningMode.Off}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
await send_attributes_report(
hass, thrm_cluster, {0x001C: Thermostat.SystemMode.Auto}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
await send_attributes_report(
hass, thrm_cluster, {0x001E: Thermostat.RunningMode.Cool}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL
await send_attributes_report(
hass, thrm_cluster, {0x001E: Thermostat.RunningMode.Heat}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_HEAT
await send_attributes_report(
hass, thrm_cluster, {0x001E: Thermostat.RunningMode.Off}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
await send_attributes_report(
hass, thrm_cluster, {0x0029: Thermostat.RunningState.Fan_State_On}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_FAN
async def test_climate_hvac_action_running_state_zen(hass, device_climate_zen):
"""Test Zen hvac action via running state."""
thrm_cluster = device_climate_zen.device.endpoints[1].thermostat
entity_id = await find_entity_id(DOMAIN, device_climate_zen, hass)
state = hass.states.get(entity_id)
assert ATTR_HVAC_ACTION not in state.attributes
await send_attributes_report(
hass, thrm_cluster, {0x0029: Thermostat.RunningState.Cool_2nd_Stage_On}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL
await send_attributes_report(
hass, thrm_cluster, {0x0029: Thermostat.RunningState.Fan_State_On}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_FAN
await send_attributes_report(
hass, thrm_cluster, {0x0029: Thermostat.RunningState.Heat_2nd_Stage_On}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_HEAT
await send_attributes_report(
hass, thrm_cluster, {0x0029: Thermostat.RunningState.Fan_2nd_Stage_On}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_FAN
await send_attributes_report(
hass, thrm_cluster, {0x0029: Thermostat.RunningState.Cool_State_On}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL
await send_attributes_report(
hass, thrm_cluster, {0x0029: Thermostat.RunningState.Fan_3rd_Stage_On}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_FAN
await send_attributes_report(
hass, thrm_cluster, {0x0029: Thermostat.RunningState.Heat_State_On}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_HEAT
await send_attributes_report(
hass, thrm_cluster, {0x0029: Thermostat.RunningState.Idle}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
await send_attributes_report(
hass, thrm_cluster, {0x001C: Thermostat.SystemMode.Heat}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
async def test_climate_hvac_action_pi_demand(hass, device_climate):
"""Test hvac action based on pi_heating/cooling_demand attrs."""
thrm_cluster = device_climate.device.endpoints[1].thermostat
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
await send_attributes_report(hass, thrm_cluster, {0x0007: 10})
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_COOL
await send_attributes_report(hass, thrm_cluster, {0x0008: 20})
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_HEAT
await send_attributes_report(hass, thrm_cluster, {0x0007: 0})
await send_attributes_report(hass, thrm_cluster, {0x0008: 0})
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
await send_attributes_report(
hass, thrm_cluster, {0x001C: Thermostat.SystemMode.Heat}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
await send_attributes_report(
hass, thrm_cluster, {0x001C: Thermostat.SystemMode.Cool}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
@pytest.mark.parametrize(
"sys_mode, hvac_mode",
(
(Thermostat.SystemMode.Auto, HVAC_MODE_HEAT_COOL),
(Thermostat.SystemMode.Cool, HVAC_MODE_COOL),
(Thermostat.SystemMode.Heat, HVAC_MODE_HEAT),
(Thermostat.SystemMode.Pre_cooling, HVAC_MODE_COOL),
(Thermostat.SystemMode.Fan_only, HVAC_MODE_FAN_ONLY),
(Thermostat.SystemMode.Dry, HVAC_MODE_DRY),
),
)
async def test_hvac_mode(hass, device_climate, sys_mode, hvac_mode):
"""Test HVAC modee."""
thrm_cluster = device_climate.device.endpoints[1].thermostat
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_OFF
await send_attributes_report(hass, thrm_cluster, {0x001C: sys_mode})
state = hass.states.get(entity_id)
assert state.state == hvac_mode
await send_attributes_report(
hass, thrm_cluster, {0x001C: Thermostat.SystemMode.Off}
)
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_OFF
await send_attributes_report(hass, thrm_cluster, {0x001C: 0xFF})
state = hass.states.get(entity_id)
assert state.state == STATE_UNKNOWN
@pytest.mark.parametrize(
"seq_of_op, modes",
(
(0xFF, {HVAC_MODE_OFF}),
(0x00, {HVAC_MODE_OFF, HVAC_MODE_COOL}),
(0x01, {HVAC_MODE_OFF, HVAC_MODE_COOL}),
(0x02, {HVAC_MODE_OFF, HVAC_MODE_HEAT}),
(0x03, {HVAC_MODE_OFF, HVAC_MODE_HEAT}),
(0x04, {HVAC_MODE_OFF, HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL}),
(0x05, {HVAC_MODE_OFF, HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL}),
),
)
async def test_hvac_modes(hass, device_climate_mock, seq_of_op, modes):
"""Test HVAC modes from sequence of operations."""
device_climate = await device_climate_mock(
CLIMATE, {"ctrl_seqe_of_oper": seq_of_op}
)
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
state = hass.states.get(entity_id)
assert set(state.attributes[ATTR_HVAC_MODES]) == modes
@pytest.mark.parametrize(
"sys_mode, preset, target_temp",
(
(Thermostat.SystemMode.Heat, None, 22),
(Thermostat.SystemMode.Heat, PRESET_AWAY, 16),
(Thermostat.SystemMode.Cool, None, 25),
(Thermostat.SystemMode.Cool, PRESET_AWAY, 27),
),
)
async def test_target_temperature(
hass, device_climate_mock, sys_mode, preset, target_temp
):
"""Test target temperature property."""
with patch.object(
zigpy.zcl.clusters.manufacturer_specific.ManufacturerSpecificCluster,
"ep_attribute",
"sinope_manufacturer_specific",
):
device_climate = await device_climate_mock(
CLIMATE_SINOPE,
{
"occupied_cooling_setpoint": 2500,
"occupied_heating_setpoint": 2200,
"system_mode": sys_mode,
"unoccupied_heating_setpoint": 1600,
"unoccupied_cooling_setpoint": 2700,
},
manuf=MANUF_SINOPE,
)
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
if preset:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: preset},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TEMPERATURE] == target_temp
@pytest.mark.parametrize(
"preset, unoccupied, target_temp",
(
(None, 1800, 17),
(PRESET_AWAY, 1800, 18),
(PRESET_AWAY, None, None),
),
)
async def test_target_temperature_high(
hass, device_climate_mock, preset, unoccupied, target_temp
):
"""Test target temperature high property."""
with patch.object(
zigpy.zcl.clusters.manufacturer_specific.ManufacturerSpecificCluster,
"ep_attribute",
"sinope_manufacturer_specific",
):
device_climate = await device_climate_mock(
CLIMATE_SINOPE,
{
"occupied_cooling_setpoint": 1700,
"system_mode": Thermostat.SystemMode.Auto,
"unoccupied_cooling_setpoint": unoccupied,
},
manuf=MANUF_SINOPE,
)
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
if preset:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: preset},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_HIGH] == target_temp
@pytest.mark.parametrize(
"preset, unoccupied, target_temp",
(
(None, 1600, 21),
(PRESET_AWAY, 1600, 16),
(PRESET_AWAY, None, None),
),
)
async def test_target_temperature_low(
hass, device_climate_mock, preset, unoccupied, target_temp
):
"""Test target temperature low property."""
with patch.object(
zigpy.zcl.clusters.manufacturer_specific.ManufacturerSpecificCluster,
"ep_attribute",
"sinope_manufacturer_specific",
):
device_climate = await device_climate_mock(
CLIMATE_SINOPE,
{
"occupied_heating_setpoint": 2100,
"system_mode": Thermostat.SystemMode.Auto,
"unoccupied_heating_setpoint": unoccupied,
},
manuf=MANUF_SINOPE,
)
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
if preset:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: preset},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_LOW] == target_temp
@pytest.mark.parametrize(
"hvac_mode, sys_mode",
(
(HVAC_MODE_AUTO, None),
(HVAC_MODE_COOL, Thermostat.SystemMode.Cool),
(HVAC_MODE_DRY, None),
(HVAC_MODE_FAN_ONLY, None),
(HVAC_MODE_HEAT, Thermostat.SystemMode.Heat),
(HVAC_MODE_HEAT_COOL, Thermostat.SystemMode.Auto),
),
)
async def test_set_hvac_mode(hass, device_climate, hvac_mode, sys_mode):
"""Test setting hvac mode."""
thrm_cluster = device_climate.device.endpoints[1].thermostat
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_OFF
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_HVAC_MODE: hvac_mode},
blocking=True,
)
state = hass.states.get(entity_id)
if sys_mode is not None:
assert state.state == hvac_mode
assert thrm_cluster.write_attributes.call_count == 1
assert thrm_cluster.write_attributes.call_args[0][0] == {
"system_mode": sys_mode
}
else:
assert thrm_cluster.write_attributes.call_count == 0
assert state.state == HVAC_MODE_OFF
# turn off
thrm_cluster.write_attributes.reset_mock()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_OFF
assert thrm_cluster.write_attributes.call_count == 1
assert thrm_cluster.write_attributes.call_args[0][0] == {
"system_mode": Thermostat.SystemMode.Off
}
async def test_preset_setting(hass, device_climate_sinope):
"""Test preset setting."""
entity_id = await find_entity_id(DOMAIN, device_climate_sinope, hass)
thrm_cluster = device_climate_sinope.device.endpoints[1].thermostat
state = hass.states.get(entity_id)
assert state.attributes[ATTR_PRESET_MODE] == PRESET_NONE
# unsuccessful occupancy change
thrm_cluster.write_attributes.return_value = [
zcl_f.WriteAttributesResponse.deserialize(b"\x01\x00\x00")[0]
]
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: PRESET_AWAY},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_PRESET_MODE] == PRESET_NONE
assert thrm_cluster.write_attributes.call_count == 1
assert thrm_cluster.write_attributes.call_args[0][0] == {"set_occupancy": 0}
# successful occupancy change
thrm_cluster.write_attributes.reset_mock()
thrm_cluster.write_attributes.return_value = [
zcl_f.WriteAttributesResponse.deserialize(b"\x00")[0]
]
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: PRESET_AWAY},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_PRESET_MODE] == PRESET_AWAY
assert thrm_cluster.write_attributes.call_count == 1
assert thrm_cluster.write_attributes.call_args[0][0] == {"set_occupancy": 0}
# unsuccessful occupancy change
thrm_cluster.write_attributes.reset_mock()
thrm_cluster.write_attributes.return_value = [
zcl_f.WriteAttributesResponse.deserialize(b"\x01\x01\x01")[0]
]
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: PRESET_NONE},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_PRESET_MODE] == PRESET_AWAY
assert thrm_cluster.write_attributes.call_count == 1
assert thrm_cluster.write_attributes.call_args[0][0] == {"set_occupancy": 1}
# successful occupancy change
thrm_cluster.write_attributes.reset_mock()
thrm_cluster.write_attributes.return_value = [
zcl_f.WriteAttributesResponse.deserialize(b"\x00")[0]
]
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: PRESET_NONE},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_PRESET_MODE] == PRESET_NONE
assert thrm_cluster.write_attributes.call_count == 1
assert thrm_cluster.write_attributes.call_args[0][0] == {"set_occupancy": 1}
async def test_preset_setting_invalid(hass, device_climate_sinope):
"""Test invalid preset setting."""
entity_id = await find_entity_id(DOMAIN, device_climate_sinope, hass)
thrm_cluster = device_climate_sinope.device.endpoints[1].thermostat
state = hass.states.get(entity_id)
assert state.attributes[ATTR_PRESET_MODE] == PRESET_NONE
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: "invalid_preset"},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_PRESET_MODE] == PRESET_NONE
assert thrm_cluster.write_attributes.call_count == 0
async def test_set_temperature_hvac_mode(hass, device_climate):
"""Test setting HVAC mode in temperature service call."""
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
thrm_cluster = device_climate.device.endpoints[1].thermostat
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_OFF
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: entity_id,
ATTR_HVAC_MODE: HVAC_MODE_HEAT_COOL,
ATTR_TEMPERATURE: 20,
},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_HEAT_COOL
assert thrm_cluster.write_attributes.await_count == 1
assert thrm_cluster.write_attributes.call_args[0][0] == {
"system_mode": Thermostat.SystemMode.Auto
}
async def test_set_temperature_heat_cool(hass, device_climate_mock):
"""Test setting temperature service call in heating/cooling HVAC mode."""
with patch.object(
zigpy.zcl.clusters.manufacturer_specific.ManufacturerSpecificCluster,
"ep_attribute",
"sinope_manufacturer_specific",
):
device_climate = await device_climate_mock(
CLIMATE_SINOPE,
{
"occupied_cooling_setpoint": 2500,
"occupied_heating_setpoint": 2000,
"system_mode": Thermostat.SystemMode.Auto,
"unoccupied_heating_setpoint": 1600,
"unoccupied_cooling_setpoint": 2700,
},
manuf=MANUF_SINOPE,
)
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
thrm_cluster = device_climate.device.endpoints[1].thermostat
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_HEAT_COOL
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: entity_id, ATTR_TEMPERATURE: 21},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_LOW] == 20.0
assert state.attributes[ATTR_TARGET_TEMP_HIGH] == 25.0
assert thrm_cluster.write_attributes.await_count == 0
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: entity_id,
ATTR_TARGET_TEMP_HIGH: 26,
ATTR_TARGET_TEMP_LOW: 19,
},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_LOW] == 19.0
assert state.attributes[ATTR_TARGET_TEMP_HIGH] == 26.0
assert thrm_cluster.write_attributes.await_count == 2
assert thrm_cluster.write_attributes.call_args_list[0][0][0] == {
"occupied_heating_setpoint": 1900
}
assert thrm_cluster.write_attributes.call_args_list[1][0][0] == {
"occupied_cooling_setpoint": 2600
}
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: PRESET_AWAY},
blocking=True,
)
thrm_cluster.write_attributes.reset_mock()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: entity_id,
ATTR_TARGET_TEMP_HIGH: 30,
ATTR_TARGET_TEMP_LOW: 15,
},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_LOW] == 15.0
assert state.attributes[ATTR_TARGET_TEMP_HIGH] == 30.0
assert thrm_cluster.write_attributes.await_count == 2
assert thrm_cluster.write_attributes.call_args_list[0][0][0] == {
"unoccupied_heating_setpoint": 1500
}
assert thrm_cluster.write_attributes.call_args_list[1][0][0] == {
"unoccupied_cooling_setpoint": 3000
}
async def test_set_temperature_heat(hass, device_climate_mock):
"""Test setting temperature service call in heating HVAC mode."""
with patch.object(
zigpy.zcl.clusters.manufacturer_specific.ManufacturerSpecificCluster,
"ep_attribute",
"sinope_manufacturer_specific",
):
device_climate = await device_climate_mock(
CLIMATE_SINOPE,
{
"occupied_cooling_setpoint": 2500,
"occupied_heating_setpoint": 2000,
"system_mode": Thermostat.SystemMode.Heat,
"unoccupied_heating_setpoint": 1600,
"unoccupied_cooling_setpoint": 2700,
},
manuf=MANUF_SINOPE,
)
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
thrm_cluster = device_climate.device.endpoints[1].thermostat
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_HEAT
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: entity_id,
ATTR_TARGET_TEMP_HIGH: 30,
ATTR_TARGET_TEMP_LOW: 15,
},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_LOW] is None
assert state.attributes[ATTR_TARGET_TEMP_HIGH] is None
assert state.attributes[ATTR_TEMPERATURE] == 20.0
assert thrm_cluster.write_attributes.await_count == 0
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: entity_id, ATTR_TEMPERATURE: 21},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_LOW] is None
assert state.attributes[ATTR_TARGET_TEMP_HIGH] is None
assert state.attributes[ATTR_TEMPERATURE] == 21.0
assert thrm_cluster.write_attributes.await_count == 1
assert thrm_cluster.write_attributes.call_args_list[0][0][0] == {
"occupied_heating_setpoint": 2100
}
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: PRESET_AWAY},
blocking=True,
)
thrm_cluster.write_attributes.reset_mock()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: entity_id, ATTR_TEMPERATURE: 22},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_LOW] is None
assert state.attributes[ATTR_TARGET_TEMP_HIGH] is None
assert state.attributes[ATTR_TEMPERATURE] == 22.0
assert thrm_cluster.write_attributes.await_count == 1
assert thrm_cluster.write_attributes.call_args_list[0][0][0] == {
"unoccupied_heating_setpoint": 2200
}
async def test_set_temperature_cool(hass, device_climate_mock):
"""Test setting temperature service call in cooling HVAC mode."""
with patch.object(
zigpy.zcl.clusters.manufacturer_specific.ManufacturerSpecificCluster,
"ep_attribute",
"sinope_manufacturer_specific",
):
device_climate = await device_climate_mock(
CLIMATE_SINOPE,
{
"occupied_cooling_setpoint": 2500,
"occupied_heating_setpoint": 2000,
"system_mode": Thermostat.SystemMode.Cool,
"unoccupied_cooling_setpoint": 1600,
"unoccupied_heating_setpoint": 2700,
},
manuf=MANUF_SINOPE,
)
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
thrm_cluster = device_climate.device.endpoints[1].thermostat
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_COOL
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: entity_id,
ATTR_TARGET_TEMP_HIGH: 30,
ATTR_TARGET_TEMP_LOW: 15,
},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_LOW] is None
assert state.attributes[ATTR_TARGET_TEMP_HIGH] is None
assert state.attributes[ATTR_TEMPERATURE] == 25.0
assert thrm_cluster.write_attributes.await_count == 0
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: entity_id, ATTR_TEMPERATURE: 21},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_LOW] is None
assert state.attributes[ATTR_TARGET_TEMP_HIGH] is None
assert state.attributes[ATTR_TEMPERATURE] == 21.0
assert thrm_cluster.write_attributes.await_count == 1
assert thrm_cluster.write_attributes.call_args_list[0][0][0] == {
"occupied_cooling_setpoint": 2100
}
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: PRESET_AWAY},
blocking=True,
)
thrm_cluster.write_attributes.reset_mock()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: entity_id, ATTR_TEMPERATURE: 22},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_LOW] is None
assert state.attributes[ATTR_TARGET_TEMP_HIGH] is None
assert state.attributes[ATTR_TEMPERATURE] == 22.0
assert thrm_cluster.write_attributes.await_count == 1
assert thrm_cluster.write_attributes.call_args_list[0][0][0] == {
"unoccupied_cooling_setpoint": 2200
}
async def test_set_temperature_wrong_mode(hass, device_climate_mock):
"""Test setting temperature service call for wrong HVAC mode."""
with patch.object(
zigpy.zcl.clusters.manufacturer_specific.ManufacturerSpecificCluster,
"ep_attribute",
"sinope_manufacturer_specific",
):
device_climate = await device_climate_mock(
CLIMATE_SINOPE,
{
"occupied_cooling_setpoint": 2500,
"occupied_heating_setpoint": 2000,
"system_mode": Thermostat.SystemMode.Dry,
"unoccupied_cooling_setpoint": 1600,
"unoccupied_heating_setpoint": 2700,
},
manuf=MANUF_SINOPE,
)
entity_id = await find_entity_id(DOMAIN, device_climate, hass)
thrm_cluster = device_climate.device.endpoints[1].thermostat
state = hass.states.get(entity_id)
assert state.state == HVAC_MODE_DRY
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: entity_id, ATTR_TEMPERATURE: 24},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TARGET_TEMP_LOW] is None
assert state.attributes[ATTR_TARGET_TEMP_HIGH] is None
assert state.attributes[ATTR_TEMPERATURE] is None
assert thrm_cluster.write_attributes.await_count == 0
async def test_occupancy_reset(hass, device_climate_sinope):
"""Test away preset reset."""
entity_id = await find_entity_id(DOMAIN, device_climate_sinope, hass)
thrm_cluster = device_climate_sinope.device.endpoints[1].thermostat
state = hass.states.get(entity_id)
assert state.attributes[ATTR_PRESET_MODE] == PRESET_NONE
await hass.services.async_call(
DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_PRESET_MODE: PRESET_AWAY},
blocking=True,
)
thrm_cluster.write_attributes.reset_mock()
state = hass.states.get(entity_id)
assert state.attributes[ATTR_PRESET_MODE] == PRESET_AWAY
thrm_cluster.read_attributes.return_value = [True], {}
await send_attributes_report(
hass, thrm_cluster, {"occupied_heating_setpoint": 1950}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_PRESET_MODE] == PRESET_NONE
async def test_fan_mode(hass, device_climate_fan):
"""Test fan mode."""
entity_id = await find_entity_id(DOMAIN, device_climate_fan, hass)
thrm_cluster = device_climate_fan.device.endpoints[1].thermostat
state = hass.states.get(entity_id)
assert set(state.attributes[ATTR_FAN_MODES]) == {FAN_AUTO, FAN_ON}
assert state.attributes[ATTR_FAN_MODE] == FAN_AUTO
await send_attributes_report(
hass, thrm_cluster, {"running_state": Thermostat.RunningState.Fan_State_On}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_FAN_MODE] == FAN_ON
await send_attributes_report(
hass, thrm_cluster, {"running_state": Thermostat.RunningState.Idle}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_FAN_MODE] == FAN_AUTO
await send_attributes_report(
hass, thrm_cluster, {"running_state": Thermostat.RunningState.Fan_2nd_Stage_On}
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_FAN_MODE] == FAN_ON
async def test_set_fan_mode_not_supported(hass, device_climate_fan):
"""Test fan setting unsupported mode."""
entity_id = await find_entity_id(DOMAIN, device_climate_fan, hass)
fan_cluster = device_climate_fan.device.endpoints[1].fan
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_FAN_MODE: FAN_LOW},
blocking=True,
)
assert fan_cluster.write_attributes.await_count == 0
async def test_set_fan_mode(hass, device_climate_fan):
"""Test fan mode setting."""
entity_id = await find_entity_id(DOMAIN, device_climate_fan, hass)
fan_cluster = device_climate_fan.device.endpoints[1].fan
state = hass.states.get(entity_id)
assert state.attributes[ATTR_FAN_MODE] == FAN_AUTO
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_FAN_MODE: FAN_ON},
blocking=True,
)
assert fan_cluster.write_attributes.await_count == 1
assert fan_cluster.write_attributes.call_args[0][0] == {"fan_mode": 4}
fan_cluster.write_attributes.reset_mock()
await hass.services.async_call(
DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: entity_id, ATTR_FAN_MODE: FAN_AUTO},
blocking=True,
)
assert fan_cluster.write_attributes.await_count == 1
assert fan_cluster.write_attributes.call_args[0][0] == {"fan_mode": 5}
| |
# Purpose: Convert Veracode XML elements to Python objects.
from __future__ import print_function
import sys
import xml.etree.ElementTree as ETree
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO
import pytz
from dateutil import parser
from veracodetocsv.helpers import models
from veracodetocsv.helpers.exceptions import VeracodeError, VeracodeAPIError
def parse_and_remove_xml_namespaces(xml_string):
if sys.version_info >= (3,):
it = ETree.iterparse(BytesIO(xml_string))
else:
it = ETree.iterparse(StringIO(xml_string))
for _, el in it:
if "}" in el.tag:
el.tag = el.tag.split("}", 1)[1] # strip all namespaces
return it.root
class DataLoader:
def __init__(self, api, build_tools):
self.api = api
self.build_tools = build_tools
def _get_apps(self):
"""Returns a list of apps"""
try:
app_list_xml = self.api.get_app_list()
except VeracodeAPIError as e:
raise VeracodeError(e)
app_list_root_element = parse_and_remove_xml_namespaces(app_list_xml)
app_elements = app_list_root_element.findall("app")
apps = []
for app_element in app_elements:
apps.append(models.App(app_element.attrib["app_id"], app_element.attrib["app_name"]))
return apps
def _get_app_info(self, app_id):
"""Returns a dict holding app info"""
try:
app_info_xml = self.api.get_app_info(app_id)
except VeracodeAPIError as e:
raise VeracodeError(e)
app_info_root_element = parse_and_remove_xml_namespaces(app_info_xml)
return app_info_root_element.find("application").attrib
def _get_sandboxes(self, app_id):
"""Returns a list of sandboxes"""
try:
sandbox_list_xml = self.api.get_sandbox_list(app_id)
except VeracodeAPIError as e:
raise VeracodeError(e)
sandbox_list_root_element = parse_and_remove_xml_namespaces(sandbox_list_xml)
sandbox_elements = sandbox_list_root_element.findall("sandbox")
sandboxes = []
for sandbox_element in sandbox_elements:
sandboxes.append(models.Sandbox(sandbox_element.attrib["sandbox_id"], sandbox_element.attrib["sandbox_name"]))
return sandboxes
def _get_builds(self, app_id, include_static_builds, include_dynamic_builds, sandbox_id=None):
"""Returns a list of builds"""
try:
if sandbox_id is None:
build_list_xml = self.api.get_build_list(app_id)
else:
build_list_xml = self.api.get_build_list(app_id, sandbox_id)
except VeracodeAPIError as e:
raise VeracodeError(e)
build_list_root_element = parse_and_remove_xml_namespaces(build_list_xml)
build_elements = build_list_root_element.findall("build")
builds = []
for build_element in build_elements:
if sandbox_id is None:
if "policy_updated_date" in build_element.attrib:
policy_updated_date_string = build_element.attrib["policy_updated_date"][:22] + \
build_element.attrib["policy_updated_date"][23:]
policy_updated_date = parser.parse(policy_updated_date_string).astimezone(pytz.utc)
else:
# In this case it's a build that hasn't completed yet, as it's not a sandbox and should have a
# policy updated date if the build has been published.
continue
else:
policy_updated_date = None
if include_static_builds and "dynamic_scan_type" not in build_element.attrib:
builds.append(models.StaticBuild(build_element.attrib["build_id"], build_element.attrib["version"], policy_updated_date))
if include_dynamic_builds and "dynamic_scan_type" in build_element.attrib:
builds.append(models.DynamicBuild(build_element.attrib["build_id"], build_element.attrib["version"], policy_updated_date))
return builds
def _get_build_info(self, app_id, build_id, sandbox_id=None):
"""Returns an XML element holding build info"""
try:
build_info_xml = self.api.get_build_info(app_id, build_id, sandbox_id)
except VeracodeAPIError as e:
raise VeracodeError(e)
build_info_root_element = parse_and_remove_xml_namespaces(build_info_xml)
return build_info_root_element.find("build")
def _get_flaws(self, build_id, build_type):
"""Returns a list of flaws"""
try:
detailed_report_xml = self.api.get_detailed_report(build_id)
except VeracodeAPIError as e:
raise VeracodeError(e)
detailed_report_root_element = parse_and_remove_xml_namespaces(detailed_report_xml)
# Use xpath to find all flaws in the detailed report
findall_string = "severity/category/cwe/" + build_type + "flaws/flaw"
flaw_elements = detailed_report_root_element.findall(findall_string)
flaw_elements.sort(key=lambda flaw: int(flaw.attrib["issueid"]))
flaws = []
for flaw_element in flaw_elements:
date_first_occurrence = parser.parse(flaw_element.attrib["date_first_occurrence"]).astimezone(pytz.utc)
if build_type == "static":
flaws.append(models.StaticFlaw(flaw_element.attrib["issueid"], date_first_occurrence,
flaw_element.attrib["severity"], flaw_element.attrib["cweid"],
flaw_element.attrib["categoryname"], flaw_element.attrib["affects_policy_compliance"],
flaw_element.attrib["remediationeffort"], flaw_element.attrib["remediation_status"],
flaw_element.attrib["mitigation_status_desc"], flaw_element.attrib["exploitLevel"],
flaw_element.attrib["module"], flaw_element.attrib["sourcefile"], flaw_element.attrib["line"]))
elif build_type == "dynamic":
flaws.append(models.DynamicFlaw(flaw_element.attrib["issueid"], date_first_occurrence,
flaw_element.attrib["severity"], flaw_element.attrib["cweid"],
flaw_element.attrib["categoryname"], flaw_element.attrib["affects_policy_compliance"],
flaw_element.attrib["remediationeffort"], flaw_element.attrib["remediation_status"],
flaw_element.attrib["mitigation_status_desc"], flaw_element.attrib["url"]))
if build_type == "static":
static_analysis_element = detailed_report_root_element.find("static-analysis")
analysis_size_bytes = None
if static_analysis_element is not None:
analysis_size_bytes = static_analysis_element.attrib["analysis_size_bytes"]
return flaws, analysis_size_bytes
else:
return flaws
def get_data(self, include_static_builds=True, include_dynamic_builds=True, app_include_list=None, include_sandboxes=False):
"""Returns a list of populated apps"""
apps = self._get_apps()
if app_include_list:
apps = [app for app in apps if app.name in app_include_list]
print("{} applications found in Veracode account".format(len(apps)))
for app in apps:
app_info = self._get_app_info(app.id)
app.business_unit = app_info["business_unit"]
builds = self._get_builds(app.id, include_static_builds, include_dynamic_builds)
app.builds = [build for build in builds if self.build_tools.build_should_be_processed(app.id, build.id, build.policy_updated_date)]
print(u"{}: {} policy builds".format(app.name, len(app.builds)))
for build in app.builds:
analysis_unit_attrib = self._get_build_info(app.id, build.id).find("analysis_unit").attrib
if "published_date" in analysis_unit_attrib:
published_date_string = analysis_unit_attrib["published_date"][:22] + analysis_unit_attrib["published_date"][23:]
build.published_date = parser.parse(published_date_string).astimezone(pytz.utc)
if build.type == "static":
build.flaws, build.analysis_size_bytes = self._get_flaws(build.id, build.type)
else:
build.flaws = self._get_flaws(build.id, build.type)
if include_sandboxes:
app.sandboxes = self._get_sandboxes(app.id)
print(u"{}: {} sandboxes".format(app.name, len(app.sandboxes)))
for sandbox in app.sandboxes:
builds = self._get_builds(app.id, include_static_builds, include_dynamic_builds, sandbox.id)
sandbox.builds = [build for build in builds if self.build_tools.build_should_be_processed(app.id, build.id, build.policy_updated_date)]
for build in sandbox.builds:
analysis_unit_attrib = self._get_build_info(app.id, build.id, sandbox.id).find("analysis_unit").attrib
if "published_date" in analysis_unit_attrib:
published_date_string = analysis_unit_attrib["published_date"][:22] + analysis_unit_attrib["published_date"][23:]
build.published_date = parser.parse(published_date_string).astimezone(pytz.utc)
build.flaws, build.analysis_size_bytes = self._get_flaws(build.id, build.type)
return apps
def get_headers(self, build_type, include_sandbox=False):
"""Returns headers for a csv file"""
app_headers = ["app_" + header for header in models.App.to_headers()]
build_headers = ["build_" + header for header in (models.StaticBuild.to_headers() if build_type == "static" else models.DynamicBuild.to_headers())]
flaw_headers = ["flaw_" + header for header in(models.StaticFlaw.to_headers() if build_type == "static" else models.DynamicFlaw.to_headers())]
headers = app_headers + build_headers + flaw_headers
if include_sandbox:
headers += ["sandbox_" + header for header in models.Sandbox.to_headers()]
return headers
| |
"""fsmonitor_inotify.py FSMonitor subclass for inotify on Linux kernel >= 2.6.13"""
__author__ = "Wim Leers (work@wimleers.com)"
__version__ = "$Rev$"
__date__ = "$Date$"
__license__ = "GPL"
from fsmonitor import *
import pyinotify
from pyinotify import WatchManager, \
ThreadedNotifier, \
ProcessEvent, \
WatchManagerError
import time
import os
import stat
import sys
# Define exceptions.
class FSMonitorInotifyError(FSMonitorError): pass
class FSMonitorInotify(FSMonitor):
"""inotify support for FSMonitor"""
EVENTMAPPING = {
FSMonitor.CREATED : pyinotify.IN_CREATE,
FSMonitor.MODIFIED : pyinotify.IN_MODIFY | pyinotify.IN_ATTRIB,
FSMonitor.DELETED : pyinotify.IN_DELETE,
FSMonitor.MONITORED_DIR_MOVED : pyinotify.IN_MOVE_SELF,
FSMonitor.DROPPED_EVENTS : pyinotify.IN_Q_OVERFLOW,
}
def __init__(self, callback, persistent=False, trigger_events_for_initial_scan=False, ignored_dirs=[], dbfile="fsmonitor.db", parent_logger=None):
FSMonitor.__init__(self, callback, persistent, trigger_events_for_initial_scan, ignored_dirs, dbfile, parent_logger)
self.logger.info("FSMonitor class used: FSMonitorInotify.")
self.wm = None
self.notifier = None
self.pathscanner_files_created = []
self.pathscanner_files_modified = []
self.pathscanner_files_deleted = []
def __fsmonitor_event_to_inotify_event(self, event_mask):
"""map an FSMonitor event to an inotify event"""
inotify_event_mask = 0
for fsmonitor_event_mask in self.__class__.EVENTMAPPING.keys():
if event_mask & fsmonitor_event_mask:
inotify_event_mask = inotify_event_mask | self.__class__.EVENTMAPPING[fsmonitor_event_mask]
return inotify_event_mask
def inotify_path_to_monitored_path(self, path):
"""map a pathname (as received in an inotify event) to its
corresponding monitored path
"""
for monitored_path in self.monitored_paths.keys():
if os.path.commonprefix([path, monitored_path]) == monitored_path:
return monitored_path
def __add_dir(self, path, event_mask):
"""override of FSMonitor.__add_dir()"""
# Immediately start monitoring this directory.
event_mask_inotify = self.__fsmonitor_event_to_inotify_event(event_mask)
try:
wdd = self.wm.add_watch(path, event_mask_inotify, proc_fun=self.process_event, rec=True, auto_add=True, quiet=False)
except WatchManagerError, e:
raise FSMonitorError, "Could not monitor '%s', reason: %s" % (path, e)
# Verify that inotify is able to monitor this directory and all of its
# subdirectories.
for monitored_path in wdd:
if wdd[monitored_path] < 0:
code = wdd[monitored_path]
raise FSMonitorError, "Could not monitor %s (%d)" % (monitored_path, code)
self.monitored_paths[path] = MonitoredPath(path, event_mask, wdd)
self.monitored_paths[path].monitoring = True
if self.persistent:
# Generate the missed events. This implies that events that
# occurred while File Conveyor was offline (or not yet in use)
# will *always* be generated, whether this is the first run or the
# thousandth.
FSMonitor.generate_missed_events(self, path)
else:
# Perform an initial scan of the directory structure. If this has
# already been done, then it will return immediately.
self.pathscanner.initial_scan(path)
return self.monitored_paths[path]
def __remove_dir(self, path):
"""override of FSMonitor.__remove_dir()"""
if path in self.monitored_paths.keys():
self.wm.rm_watch(path, rec=True, quiet=True)
del self.monitored_paths[path]
def run(self):
# Setup. Ensure that this isn't interleaved with any other thread, so
# that the DB setup continues as expected.
self.lock.acquire()
FSMonitor.setup(self)
self.process_event = FSMonitorInotifyProcessEvent(self)
self.lock.release()
# Set up inotify.
self.wm = WatchManager()
self.notifier = ThreadedNotifier(self.wm, self.process_event)
self.notifier.start()
while not self.die:
self.__process_queues()
time.sleep(0.5)
self.notifier.stop()
def stop(self):
"""override of FSMonitor.stop()"""
# Let the thread know it should die.
self.lock.acquire()
self.die = True
self.lock.release()
# Stop monitoring each monitored path.
for path in self.monitored_paths.keys():
self.__remove_dir(path)
def __process_pathscanner_updates(self, update_list, callback):
self.lock.acquire()
if len(update_list) > 0:
callback(update_list)
del update_list[:] # Clear the list with updates.
self.lock.release()
def __process_queues(self):
# Process "add monitored path" queue.
self.lock.acquire()
if not self.add_queue.empty():
(path, event_mask) = self.add_queue.get()
self.lock.release()
self.__add_dir(path, event_mask)
else:
self.lock.release()
# Process "remove monitored path" queue.
self.lock.acquire()
if not self.remove_queue.empty():
path = self.add_queue.get()
self.lock.release()
self.__remove_dir(path)
else:
self.lock.release()
# These calls to PathScanner is what ensures that FSMonitor.db
# remains up-to-date. (These lists of files to add, update and delete
# from the DB are applied to PathScanner.)
self.__process_pathscanner_updates(self.pathscanner_files_created, self.pathscanner.add_files )
self.__process_pathscanner_updates(self.pathscanner_files_modified, self.pathscanner.update_files)
self.__process_pathscanner_updates(self.pathscanner_files_deleted, self.pathscanner.delete_files)
class FSMonitorInotifyProcessEvent(ProcessEvent):
# On Linux, you can choose which encoding is used for your file system's
# file names. Hence, we better detect the file system's encoding so we
# know what to decode from in __ensure_unicode().
encoding = sys.getfilesystemencoding()
def __init__(self, fsmonitor):
ProcessEvent.__init__(self)
self.fsmonitor_ref = fsmonitor
self.discovered_through = "inotify"
def __update_pathscanner_db(self, pathname, event_type):
"""use PathScanner.(add|update|delete)_files() to queue updates for
PathScanner's DB
"""
(path, filename) = os.path.split(pathname)
if event_type == FSMonitor.DELETED:
# Build tuple for deletion of row in PathScanner's DB.
t = (path, filename)
self.fsmonitor_ref.pathscanner_files_deleted.append(t)
else:
# Build tuple for PathScanner's DB of the form (path, filename,
# mtime), with mtime = -1 when it's a directory.
st = os.stat(pathname)
is_dir = stat.S_ISDIR(st.st_mode)
if not is_dir:
mtime = st[stat.ST_MTIME]
t = (path, filename, mtime)
else:
t = (path, filename, -1)
# Update PathScanner's DB.
if event_type == FSMonitor.CREATED:
self.fsmonitor_ref.pathscanner_files_created.append(t)
else:
self.fsmonitor_ref.pathscanner_files_modified.append(t)
@classmethod
def __ensure_unicode(cls, event):
event.path = event.path.decode(cls.encoding)
event.pathname = event.pathname.decode(cls.encoding)
return event
def process_IN_CREATE(self, event):
event = self.__ensure_unicode(event)
if FSMonitor.is_in_ignored_directory(self.fsmonitor_ref, event.path):
return
monitored_path = self.fsmonitor_ref.inotify_path_to_monitored_path(event.path)
self.fsmonitor_ref.logger.debug("inotify reports that an IN_CREATE event has occurred for '%s'." % (event.pathname))
self.__update_pathscanner_db(event.pathname, FSMonitor.CREATED)
FSMonitor.trigger_event(self.fsmonitor_ref, monitored_path, event.pathname, FSMonitor.CREATED, self.discovered_through)
def process_IN_DELETE(self, event):
event = self.__ensure_unicode(event)
if FSMonitor.is_in_ignored_directory(self.fsmonitor_ref, event.path):
return
monitored_path = self.fsmonitor_ref.inotify_path_to_monitored_path(event.path)
self.fsmonitor_ref.logger.debug("inotify reports that an IN_DELETE event has occurred for '%s'." % (event.pathname))
self.__update_pathscanner_db(event.pathname, FSMonitor.DELETED)
FSMonitor.trigger_event(self.fsmonitor_ref, monitored_path, event.pathname, FSMonitor.DELETED, self.discovered_through)
def process_IN_MODIFY(self, event):
event = self.__ensure_unicode(event)
if FSMonitor.is_in_ignored_directory(self.fsmonitor_ref, event.path):
return
monitored_path = self.fsmonitor_ref.inotify_path_to_monitored_path(event.path)
self.fsmonitor_ref.logger.debug("inotify reports that an IN_MODIFY event has occurred for '%s'." % (event.pathname))
self.__update_pathscanner_db(event.pathname, FSMonitor.MODIFIED)
FSMonitor.trigger_event(self.fsmonitor_ref, monitored_path, event.pathname, FSMonitor.MODIFIED, self.discovered_through)
def process_IN_ATTRIB(self, event):
event = self.__ensure_unicode(event)
if FSMonitor.is_in_ignored_directory(self.fsmonitor_ref, event.path):
return
monitored_path = self.fsmonitor_ref.inotify_path_to_monitored_path(event.path)
self.fsmonitor_ref.logger.debug("inotify reports that an IN_ATTRIB event has occurred for '%s'." % (event.pathname))
self.__update_pathscanner_db(event.pathname, FSMonitor.MODIFIED)
FSMonitor.trigger_event(self.fsmonitor_ref, monitored_path, event.pathname, FSMonitor.MODIFIED, self.discovered_through)
def process_IN_MOVE_SELF(self, event):
event = self.__ensure_unicode(event)
if FSMonitor.is_in_ignored_directory(self.fsmonitor_ref, event.path):
return
self.fsmonitor_ref.logger.debug("inotify reports that an IN_MOVE_SELF event has occurred for '%s'." % (event.pathname))
monitored_path = self.fsmonitor_ref.inotify_path_to_monitored_path(event.path)
FSMonitor.trigger_event(self.fsmonitor_ref, monitored_path, event.pathname, FSMonitor.MONITORED_DIR_MOVED, self.discovered_through)
def process_IN_Q_OVERFLOW(self, event):
event = self.__ensure_unicode(event)
if FSMonitor.is_in_ignored_directory(self.fsmonitor_ref, event.path):
return
self.fsmonitor_ref.logger.debug("inotify reports that an IN_Q_OVERFLOW event has occurred for '%s'." % (event.pathname))
monitored_path = self.fsmonitor_ref.inotify_path_to_monitored_path(event.path)
FSMonitor.trigger_event(self.fsmonitor_ref, monitored_path, event.pathname, FSMonitor.DROPPED_EVENTS, self.discovered_through)
def process_default(self, event):
# Event not supported!
self.fsmonitor_ref.logger.debug("inotify reports that an unsupported event (mask: %d, %s) has occurred for '%s'." % (event.mask, event.maskname, event.pathname))
pass
| |
"""
Blaze storage backend, structured collection of unboxed memory. We
follow the same conventions as CPython in that we use the arena model of
memory.
It does the mallocs of data as needed to shuffle blocks efficiently for
data that is passed to Numba. It is also one place for safe management
of blocks of data from SQL, Disk, IOPro, etc to allocate on to rather
than having every adaptor define memory wherever it feels like calling
malloc.
:Inspiration:
http://svn.python.org/projects/python/trunk/Python/pyarena.c
:Design Principles:
- Will write a Cython wrapper that should be drop in
replacable for ``libc.stdlib.malloc``.
- Be able to do zero copy network transfers straight into arena
buffers from ZeroMQ and MPI.
- Be able store the data on the GPU while executing Numba GPU kernels.
The problem is the same as a "distributed memory" problem over since we
have two storage substrates Siu will probably have more insight on how
this should be designed.
- Will almost certainly migrate this to C / Cython
"""
import os
import sys
import mmap
import ctypes
import bisect
import weakref
import traceback
import threading
import itertools
import numpy as np
from blaze.cutils import buffer_pointer
def address_of_buffer(buf):
if isinstance(buf, memoryview):
return id(buf), len(buf)
elif isinstance(buf, mmap.mmap):
return buffer_pointer(buf)
ALIGN_L2 = 2**17
ALIGN_L3 = 2**20
ALIGN_PAGE = mmap.PAGESIZE
#------------------------------------------------------------------------
# Arenas
#------------------------------------------------------------------------
class Arena(object):
def __init__(self, size, name=None):
# malloc but with \x00
self.block = mmap.mmap(-1, size)
self.size = size
self.name = None
def write_raw(self, by):
assert isinstance(by, bytes)
#------------------------------------------------------------------------
# Heap
#------------------------------------------------------------------------
class Heap(object):
_alignment = 8
def __init__(self, size=mmap.PAGESIZE, align=ALIGN_PAGE):
self.align = align
self._lock = threading.Lock()
self._size = size
self._lengths = []
self._len_to_seq = {}
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
self._finalizers = {}
@staticmethod
def _roundup(n, alignment):
mask = alignment - 1
return (n + mask) & ~mask
def free(self, block):
# free a block returned by malloc()
self._lock.acquire()
try:
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
assert 0 <= size < sys.maxint
self._lock.acquire()
try:
size = self._roundup(max(size,1), self._alignment)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
finally:
self._lock.release()
def _malloc(self, size):
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
length = self._roundup(max(self._size, size), self.align)
self._size *= 2
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _free(self, block):
# free location and try to merge with neighbours
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)
try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
return start, stop
#------------------------------------------------------------------------
# Heap Objects
#------------------------------------------------------------------------
class Buffer(object):
def __init__(self, size, heap):
assert 0 <= size < sys.maxint
block = heap.malloc(size)
self._state = (block, size)
Finalizer(heap, self, heap.free, args=(block,))
def get_address(self):
(arena, start, stop), size = self._state
address, length = buffer_pointer(arena.block)
assert size <= length
return address + start
def get_size(self):
return self._state[1]
def allocate_raw(heap, nbytes):
buf = Buffer(nbytes, heap)
address = buf.get_address()
block, size = buf._state
arena, start, new_stop = block
return address, block, (ctypes.c_char*nbytes).from_address(address)
def allocate_numpy(heap, dtype, shape):
""" Allocate a NumPy array conforming to the given shape on the heap """
count = np.prod(shape)
size = dtype.itemsize * count
buf = Buffer(size, heap)
address = buf.get_address()
block, size = buf._state
arena, start, new_stop = block
return address, np.frombuffer(arena.block, dtype, count)
def allocate_carray(heap, dtype, chunksize):
""" Allocate a buffer capable of holding a carray chunk """
size = dtype.itemsize * chunksize
buf = Buffer(size)
address = buf.get_address()
block, size = buf._state
arena, start, new_stop = block
return address, np.frombuffer(arena.block)
def numpy_pointer(numpy_array, ctype=ctypes.c_void_p):
return numpy_array.ctypes.data_as(ctype)
#------------------------------------------------------------------------
# Finalizers
#------------------------------------------------------------------------
class Finalizer(object):
def __init__(self, heap, obj, callback, args=(), kwargs=None):
self._heap = heap
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self.ident = id(obj)
heap._finalizers[self.ident] = self
def __call__(self, wr=None):
try:
del self._heap._finalizers[self.ident]
except KeyError:
raise RuntimeError()
else:
self._callback(*self._args, **self._kwargs)
self._weakref = None
def finalize(heap):
items = [x for x in heap._finalizers.items()]
error = None
for key, finalizer in items:
finalizer()
if not error:
for block in heap._allocated_blocks:
heap.free(block)
else:
raise RuntimeError("Could not free blocks because finalizer failed")
| |
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import ddt
import mock
import testtools
import zmq
from rally.plugins.agent import agent
BASE = "rally.plugins.agent.agent"
@ddt.ddt
class CommandExecutorTestCase(testtools.TestCase):
def test__thread_target(self):
mock_process = mock.Mock(**{"wait.return_value": "foobar"})
executor = agent.CommandExecutor({}, {})
executor._thread_target(mock_process)
self.assertEqual("foobar", executor.exit_code)
mock_process.wait.assert_called_once_with()
@ddt.data(
{"config": "null",
"expected": "null"},
{"config": "",
"expected": subprocess.PIPE},
{"config": "tmpfile",
"expected": "namedtemp"},
{"config": "stdout", "is_stderr": True,
"expected": subprocess.STDOUT},
{"config": "", "thread": True,
"expected": "namedtemp"},
{"config": "null", "thread": True,
"expected": "null"},
)
@mock.patch("%s.open" % BASE, return_value="null")
@mock.patch("%s.tempfile.NamedTemporaryFile" % BASE,
return_value="namedtemp")
def test__get_redirection(self, param, mock_tempfile_named_temporary_file,
mock_agent_open):
expected = param.pop("expected")
actual = agent.CommandExecutor._get_redirection(**param)
self.assertEqual(expected, actual)
if expected == "null":
mock_agent_open.assert_called_once_with("/dev/null", "wb")
elif expected == "namedtemp":
mock_tempfile_named_temporary_file.assert_called_once_with()
def test__get_stdout_stderr(self):
executor = agent.CommandExecutor(
{
"foo": "bar", "thread": "sometimes",
"stdout": "to", "stderr": "hell"
},
{
"resp": "foobar"
})
executor._get_redirection = mock.Mock()
executor._get_stdout_stderr()
self.assertEqual(
[
mock.call("to", thread="sometimes"),
mock.call("hell", thread="sometimes", is_stderr=True)
],
executor._get_redirection.mock_calls)
def _get_executor_for_run(self, thread=False):
req = {
"foo": "bar",
"path": ["some", "path", "to", "executable"],
"thread": thread
}
resp = {"resp": "foobar"}
executor = agent.CommandExecutor(req, resp)
return executor, req, resp
@mock.patch("%s.subprocess.Popen" % BASE)
def test_run(self, mock_subprocess_popen):
executor, req, resp = self._get_executor_for_run()
executor._get_stdout_stderr = mock.Mock(
return_value=("stdout_fh", "stderr_fh"))
mock_subprocess_popen.return_value.communicate.return_value = (
"stdout_out", "stderr_out")
mock_subprocess_popen.return_value.wait.return_value = "barfoo"
new_resp = executor.run()
self.assertEqual(new_resp, resp)
self.assertEqual(
{
"exit_code": "barfoo",
"resp": "foobar",
"stdout": "stdout_out".encode("utf-8"),
"stderr": "stderr_out".encode("utf-8")
},
resp)
mock_subprocess_popen.assert_called_once_with(
req["path"], stdout="stdout_fh", stderr="stderr_fh",
env=None)
self.assertEqual(
[
mock.call.communicate(),
mock.call.wait()
],
mock_subprocess_popen.return_value.mock_calls)
@mock.patch("%s.subprocess.Popen" % BASE)
@mock.patch("%s.threading.Thread" % BASE)
@mock.patch("%s.open" % BASE,
side_effect=["stdout_new_fh", "stderr_new_fh"])
def test_run_thread(self, mock_agent_open,
mock_threading_thread, mock_subprocess_popen):
executor, req, resp = self._get_executor_for_run(thread=True)
mock_stdout = mock.Mock(name="stdout")
mock_stderr = mock.Mock(name="stderr")
executor._get_stdout_stderr = mock.Mock(
return_value=(mock_stdout, mock_stderr))
mock_subprocess_popen.return_value.wait.return_value = "barfoo"
new_resp = executor.run()
self.assertEqual(new_resp, resp)
self.assertEqual(
{
"resp": "foobar",
"stdout_fh": mock_stdout.name,
"stderr_fh": mock_stderr.name
},
resp)
mock_subprocess_popen.assert_called_once_with(
req["path"], stdout=mock_stdout, stderr=mock_stderr, env=None)
mock_threading_thread.assert_called_once_with(
target=executor._thread_target,
args=(mock_subprocess_popen.return_value,))
mock_threading_thread.return_value.start.assert_called_once_with()
self.assertEqual(mock_threading_thread.return_value,
executor.thread)
self.assertEqual(
[
mock.call(mock_stdout.name, "rb"),
mock.call(mock_stderr.name, "rb"),
],
mock_agent_open.mock_calls
)
self.assertEqual("stdout_new_fh", executor.stdout_fh)
self.assertEqual("stderr_new_fh", executor.stderr_fh)
self.assertEqual(mock_stdout, executor.child_stdout_fh)
self.assertEqual(mock_stderr, executor.child_stderr_fh)
def test_clear(self):
executor = agent.CommandExecutor({}, {})
def get_names():
for name in ("stdout_fh", "stderr_fh"):
for prefix in ("", "child_"):
yield prefix + name
for name in get_names():
setattr(executor, name, mock.Mock())
executor.clear()
for name in get_names():
getattr(executor, name).close.assert_called_once_with()
@ddt.ddt
class AgentTestCase(testtools.TestCase):
mocks = []
def _start_zmq_mocks(self):
# FIXME(pboldin): this should be done clearer
self.mocks = [
mock.patch("%s.Agent.init_subscribe_zmq" % BASE),
mock.patch("%s.Agent.init_push_zmq" % BASE)
]
return [mock_.start() for mock_ in self.mocks]
def _stop_zmq_mocks(self):
for mock_ in self.mocks:
mock_.stop()
def tearDown(self):
super(AgentTestCase, self).tearDown()
self._stop_zmq_mocks()
@mock.patch("%s.uuid.uuid4" % BASE, return_value="uuid4")
def test___init__(self, mock_uuid_uuid4):
mock_init_subscribe_zmq, mock_init_push_zmq = self._start_zmq_mocks()
agent_instance = agent.Agent("subscribe_url", "push_url")
self.assertEqual(mock_uuid_uuid4.return_value, agent_instance.agent_id)
mock_init_subscribe_zmq.assert_called_once_with("subscribe_url")
mock_init_push_zmq.assert_called_once_with("push_url")
@mock.patch("%s.zmq.Context" % BASE)
def test_init_zmq(self, mock_zmq_context):
agent.Agent("subscribe_url", "push_url")
self.assertEqual(
[
# SUB socket
# zmq.Context()
mock.call(),
# context.socket(zmq.SUB)
mock.call().socket(zmq.SUB),
# socket.connect
mock.call().socket().connect("subscribe_url"),
# socket.setsockopt_string(zmq.SUBSCRIBE...)
mock.call().socket().setsockopt_string(zmq.SUBSCRIBE, u""),
# PUSH socket
# zmq.Context()
mock.call(),
# context.socket(zmq.PUSH)
mock.call().socket(zmq.PUSH),
# socket.connect
mock.call().socket().connect("push_url"),
],
mock_zmq_context.mock_calls)
@ddt.data(
{
"agent_id": "abc",
"recv_json": {
"target": "abc",
"foo": "bar"
},
},
{
"agent_id": "abc",
"recv_json": {
"target": ["abc", "def"],
"foo": "bar"
},
},
{
"recv_json": {
"target": "abc",
"foo": "bar"
},
"expected": None
},
{
"recv_json": {"foo": "bar"},
},
)
@ddt.unpack
def test_recv_request(self, agent_id="foobar",
recv_json={}, expected=True):
mock_init_subscribe_zmq, mock_init_push_zmq = self._start_zmq_mocks()
subscribe_socket = mock_init_subscribe_zmq.return_value
subscribe_socket.recv_json.return_value = recv_json
agent_instance = agent.Agent("subscribe_url", "push_url",
agent_id=agent_id)
retval = agent_instance.recv_request()
subscribe_socket.recv_json.assert_called_once_with()
if expected is True:
expected = recv_json
self.assertEqual(expected, retval)
def test_do_default(self):
self._start_zmq_mocks()
agent_instance = agent.Agent("subscribe_url", "push_url")
self.assertRaises(ValueError, agent_instance.do_default,
{"action": "foobar"}, None)
def test_loop_none(self):
self._start_zmq_mocks()
agent_instance = agent.Agent("subscribe_url", "push_url")
agent_instance.recv_request = mock.Mock(return_value=None)
agent_instance.loop()
agent_instance.recv_request.assert_called_once_with()
def test_loop_unknown_action(self):
_, mock_init_push_zmq = self._start_zmq_mocks()
push_socket = mock_init_push_zmq.return_value
agent_instance = agent.Agent("subscribe_url", "push_url")
agent_instance.recv_request = mock.Mock(
return_value={
"action": "unknown",
"req": "foobar"
})
agent_instance.loop()
push_socket.send_json.assert_called_once_with(
{
"error": "Action 'unknown' unknown.",
"req": "foobar", "agent": agent_instance.agent_id
}
)
def test_loop_mock_action(self):
_, mock_init_push_zmq = self._start_zmq_mocks()
push_socket = mock_init_push_zmq.return_value
agent_instance = agent.Agent("subscribe_url", "push_url")
agent_instance.recv_request = mock.Mock(
return_value={
"action": "mock",
"req": "foobar"
})
agent_instance.do_mock = mock.Mock(return_value={"custom": "return"})
agent_instance.loop()
push_socket.send_json.assert_called_once_with(
{"custom": "return"}
)
def test_loop_mock_action_return_none(self):
_, mock_init_push_zmq = self._start_zmq_mocks()
push_socket = mock_init_push_zmq.return_value
agent_instance = agent.Agent("subscribe_url", "push_url")
agent_instance.recv_request = mock.Mock(
return_value={
"action": "mock",
"req": "foobar"
})
def do_mock(req, resp):
resp["foo"] = "bar"
agent_instance.do_mock = do_mock
agent_instance.loop()
push_socket.send_json.assert_called_once_with(
{
"req": "foobar",
"agent": agent_instance.agent_id,
"foo": "bar"
}
)
@mock.patch("%s.datetime.datetime" % BASE)
def test_do_ping(self, mock_datetime_datetime):
self._start_zmq_mocks()
agent_instance = agent.Agent("subscribe_url", "push_url")
mock_datetime_datetime.utcnow.return_value.isoformat.return_value = (
"foobar"
)
req = {}
resp = {}
agent_instance.do_ping(req, resp)
self.assertEqual("foobar", resp["time"])
self.assertEqual({}, req)
def test_do_tail_no_executor(self):
self._start_zmq_mocks()
agent_instance = agent.Agent("subscribe_url", "push_url")
self.assertRaises(ValueError, agent_instance.do_tail, {}, {})
def test_do_tail(self):
self._start_zmq_mocks()
agent_instance = agent.Agent("subscribe_url", "push_url")
executor = agent_instance.executor = mock.Mock()
def mock_pipe_return(pipe, return_value):
pipe.read.return_value.decode.return_value = return_value
mock_pipe_return(executor.stdout_fh, "stdout")
mock_pipe_return(executor.stderr_fh, "stderr")
req = {"size": "4200"}
resp = {}
agent_instance.do_tail(req, resp)
def assert_pipe(pipe):
pipe.read.assert_called_once_with(4200)
pipe.read.return_value.decode.assert_called_once_with("utf-8")
assert_pipe(executor.stdout_fh)
assert_pipe(executor.stderr_fh)
self.assertEqual(
{
"stdout": "stdout",
"stderr": "stderr"
},
resp)
def test_do_check_no_executor(self):
self._start_zmq_mocks()
agent_instance = agent.Agent("subscribe_url", "push_url")
self.assertRaises(ValueError, agent_instance.do_check, {}, {})
@ddt.unpack
@ddt.data(
{"req": {"clear": "true"}},
{"req": {"wait": "true"}},
{"req": {"clear": "true", "wait": "true"}},
{"req": {}},
)
def test_do_check(self, req):
self._start_zmq_mocks()
agent_instance = agent.Agent("subscribe_url", "push_url")
executor = agent_instance.executor = mock.Mock(exit_code="foobar")
resp = {}
agent_instance.do_check(req, resp)
if req.get("wait") or req.get("clear"):
executor.thread.join.assert_called_once_with()
self.assertEqual({"exit_code": "foobar"}, resp)
if req.get("clear"):
executor.clear.assert_called_once_with()
self.assertIsNone(agent_instance.executor)
def test_do_command_already(self):
self._start_zmq_mocks()
agent_instance = agent.Agent("subscribe_url", "push_url")
agent_instance.executor = mock.Mock(thread=True)
self.assertRaises(ValueError, agent_instance.do_command, {}, {})
@mock.patch("%s.CommandExecutor" % BASE)
def test_do_command(self, mock_agent_command_executor):
self._start_zmq_mocks()
agent_instance = agent.Agent("subscribe_url", "push_url")
req = {}
resp = {}
agent_instance.do_command(req, resp)
mock_agent_command_executor.assert_called_once_with(
req, resp, agent_instance.agent_id)
self.assertEqual(mock_agent_command_executor.return_value,
agent_instance.executor)
mock_agent_command_executor.return_value.run.assert_called_once_with()
| |
# License
# Copyright (c) 2008, Armin Ronacher All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# Neither the name of the nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Taken from https://github.com/andreif/codegen
"""
codegen
~~~~~~~
Extension to ast that allow ast -> python code generation.
:copyright: Copyright 2008 by Armin Ronacher.
:license: BSD.
"""
from ast import *
BINOP_SYMBOLS = {}
BINOP_SYMBOLS[Add] = '+'
BINOP_SYMBOLS[Sub] = '-'
BINOP_SYMBOLS[Mult] = '*'
BINOP_SYMBOLS[Div] = '/'
BINOP_SYMBOLS[Mod] = '%'
BINOP_SYMBOLS[Pow] = '**'
BINOP_SYMBOLS[LShift] = '<<'
BINOP_SYMBOLS[RShift] = '>>'
BINOP_SYMBOLS[BitOr] = '|'
BINOP_SYMBOLS[BitXor] = '^'
BINOP_SYMBOLS[BitAnd] = '&'
BINOP_SYMBOLS[FloorDiv] = '//'
BOOLOP_SYMBOLS = {}
BOOLOP_SYMBOLS[And] = 'and'
BOOLOP_SYMBOLS[Or] = 'or'
CMPOP_SYMBOLS = {}
CMPOP_SYMBOLS[Eq] = '=='
CMPOP_SYMBOLS[NotEq] = '!='
CMPOP_SYMBOLS[Lt] = '<'
CMPOP_SYMBOLS[LtE] = '<='
CMPOP_SYMBOLS[Gt] = '>'
CMPOP_SYMBOLS[GtE] = '>='
CMPOP_SYMBOLS[Is] = 'is'
CMPOP_SYMBOLS[IsNot] = 'is not'
CMPOP_SYMBOLS[In] = 'in'
CMPOP_SYMBOLS[NotIn] = 'not in'
UNARYOP_SYMBOLS = {}
UNARYOP_SYMBOLS[Invert] = '~'
UNARYOP_SYMBOLS[Not] = 'not'
UNARYOP_SYMBOLS[UAdd] = '+'
UNARYOP_SYMBOLS[USub] = '-'
def to_source(node, indent_with=' ' * 4, add_line_information=False):
"""This function can convert a node tree back into python sourcecode.
This is useful for debugging purposes, especially if you're dealing with
custom asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
If `add_line_information` is set to `True` comments for the line numbers
of the nodes are added to the output. This can be used to spot wrong line
number information of statement nodes.
"""
generator = SourceGenerator(indent_with, add_line_information)
generator.visit(node)
return ''.join(generator.result)
class SourceGenerator(NodeVisitor):
"""This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with, add_line_information=False):
self.result = []
self.indent_with = indent_with
self.add_line_information = add_line_information
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, node=None, extra=0):
self.new_lines = max(self.new_lines, 1 + extra)
if node is not None and self.add_line_information:
self.write('# line: %s' % node.lineno)
self.new_lines = 1
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline(decorator)
self.write('@')
self.visit(decorator)
# Statements
def visit_Assert(self, node):
self.newline(node)
self.write('assert ')
self.visit(node.test)
if node.msg is not None:
self.write(', ')
self.visit(node.msg)
def visit_Assign(self, node):
self.newline(node)
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline(node)
self.visit(node.target)
self.write(' ' + BINOP_SYMBOLS[type(node.op)] + '= ')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline(node)
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline(node)
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline(node)
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(extra=1)
self.decorators(node)
self.newline(node)
self.write('def %s(' % node.name)
self.visit(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(extra=2)
self.decorators(node)
self.newline(node)
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline(node)
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 0:
break
elif len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline(node)
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline(node)
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline(node)
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline(node)
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline(node)
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline(node)
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline(node)
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline(node)
self.write('try:')
self.body(node.body)
self.newline(node)
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline(node)
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline(node)
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline(node)
if node.value is None:
self.write('return')
else:
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline(node)
self.write('break')
def visit_Continue(self, node):
self.newline(node)
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline(node)
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.visit(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline(node)
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
def visit_arguments(self, node):
self.signature(node)
| |
#!/usr/bin/env python
'''
Quantile regression model
Model parameters are estimated using iterated reweighted least squares. The
asymptotic covariance matrix estimated using kernel density estimation.
Author: Vincent Arel-Bundock
License: BSD-3
Created: 2013-03-19
The original IRLS function was written for Matlab by Shapour Mohammadi,
University of Tehran, 2008 (shmohammadi@gmail.com), with some lines based on
code written by James P. Lesage in Applied Econometrics Using MATLAB(1999).PP.
73-4. Translated to python with permission from original author by Christian
Prinoth (christian at prinoth dot name).
'''
from statsmodels.compat.python import range
import numpy as np
import warnings
import scipy.stats as stats
from scipy.linalg import pinv
from scipy.stats import norm
from statsmodels.tools.tools import chain_dot
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.tools.decorators import cache_readonly
from statsmodels.regression.linear_model import (RegressionModel,
RegressionResults,
RegressionResultsWrapper)
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
class QuantReg(RegressionModel):
'''Quantile Regression
Estimate a quantile regression model using iterative reweighted least
squares.
Parameters
----------
endog : array or dataframe
endogenous/response variable
exog : array or dataframe
exogenous/explanatory variable(s)
Notes
-----
The Least Absolute Deviation (LAD) estimator is a special case where
quantile is set to 0.5 (q argument of the fit method).
The asymptotic covariance matrix is estimated following the procedure in
Greene (2008, p.407-408), using either the logistic or gaussian kernels
(kernel argument of the fit method).
References
----------
General:
* Birkes, D. and Y. Dodge(1993). Alternative Methods of Regression, John Wiley and Sons.
* Green,W. H. (2008). Econometric Analysis. Sixth Edition. International Student Edition.
* Koenker, R. (2005). Quantile Regression. New York: Cambridge University Press.
* LeSage, J. P.(1999). Applied Econometrics Using MATLAB,
Kernels (used by the fit method):
* Green (2008) Table 14.2
Bandwidth selection (used by the fit method):
* Bofinger, E. (1975). Estimation of a density function using order statistics. Australian Journal of Statistics 17: 1-17.
* Chamberlain, G. (1994). Quantile regression, censoring, and the structure of wages. In Advances in Econometrics, Vol. 1: Sixth World Congress, ed. C. A. Sims, 171-209. Cambridge: Cambridge University Press.
* Hall, P., and S. Sheather. (1988). On the distribution of the Studentized quantile. Journal of the Royal Statistical Society, Series B 50: 381-391.
Keywords: Least Absolute Deviation(LAD) Regression, Quantile Regression,
Regression, Robust Estimation.
'''
def __init__(self, endog, exog, **kwargs):
super(QuantReg, self).__init__(endog, exog, **kwargs)
def whiten(self, data):
"""
QuantReg model whitener does nothing: returns data.
"""
return data
def fit(self, q=.5, vcov='robust', kernel='epa', bandwidth='hsheather',
max_iter=1000, p_tol=1e-6, **kwargs):
'''Solve by Iterative Weighted Least Squares
Parameters
----------
q : float
Quantile must be between 0 and 1
vcov : string, method used to calculate the variance-covariance matrix
of the parameters. Default is ``robust``:
- robust : heteroskedasticity robust standard errors (as suggested
in Greene 6th edition)
- iid : iid errors (as in Stata 12)
kernel : string, kernel to use in the kernel density estimation for the
asymptotic covariance matrix:
- epa: Epanechnikov
- cos: Cosine
- gau: Gaussian
- par: Parzene
bandwidth: string, Bandwidth selection method in kernel density
estimation for asymptotic covariance estimate (full
references in QuantReg docstring):
- hsheather: Hall-Sheather (1988)
- bofinger: Bofinger (1975)
- chamberlain: Chamberlain (1994)
'''
if q < 0 or q > 1:
raise Exception('p must be between 0 and 1')
kern_names = ['biw', 'cos', 'epa', 'gau', 'par']
if kernel not in kern_names:
raise Exception("kernel must be one of " + ', '.join(kern_names))
else:
kernel = kernels[kernel]
if bandwidth == 'hsheather':
bandwidth = hall_sheather
elif bandwidth == 'bofinger':
bandwidth = bofinger
elif bandwidth == 'chamberlain':
bandwidth = chamberlain
else:
raise Exception("bandwidth must be in 'hsheather', 'bofinger', 'chamberlain'")
endog = self.endog
exog = self.exog
nobs = self.nobs
exog_rank = np_matrix_rank(self.exog)
self.rank = exog_rank
self.df_model = float(self.rank - self.k_constant)
self.df_resid = self.nobs - self.rank
n_iter = 0
xstar = exog
beta = np.ones(exog_rank)
# TODO: better start, initial beta is used only for convergence check
# Note the following doesn't work yet,
# the iteration loop always starts with OLS as initial beta
# if start_params is not None:
# if len(start_params) != rank:
# raise ValueError('start_params has wrong length')
# beta = start_params
# else:
# # start with OLS
# beta = np.dot(np.linalg.pinv(exog), endog)
diff = 10
cycle = False
history = dict(params = [], mse=[])
while n_iter < max_iter and diff > p_tol and not cycle:
n_iter += 1
beta0 = beta
xtx = np.dot(xstar.T, exog)
xty = np.dot(xstar.T, endog)
beta = np.dot(pinv(xtx), xty)
resid = endog - np.dot(exog, beta)
mask = np.abs(resid) < .000001
resid[mask] = np.sign(resid[mask]) * .000001
resid = np.where(resid < 0, q * resid, (1-q) * resid)
resid = np.abs(resid)
xstar = exog / resid[:, np.newaxis]
diff = np.max(np.abs(beta - beta0))
history['params'].append(beta)
history['mse'].append(np.mean(resid*resid))
if (n_iter >= 300) and (n_iter % 100 == 0):
# check for convergence circle, shouldn't happen
for ii in range(2, 10):
if np.all(beta == history['params'][-ii]):
cycle = True
break
warnings.warn("Convergence cycle detected", ConvergenceWarning)
if n_iter == max_iter:
warnings.warn("Maximum number of iterations (1000) reached.",
IterationLimitWarning)
e = endog - np.dot(exog, beta)
# Greene (2008, p.407) writes that Stata 6 uses this bandwidth:
# h = 0.9 * np.std(e) / (nobs**0.2)
# Instead, we calculate bandwidth as in Stata 12
iqre = stats.scoreatpercentile(e, 75) - stats.scoreatpercentile(e, 25)
h = bandwidth(nobs, q)
h = min(np.std(endog),
iqre / 1.34) * (norm.ppf(q + h) - norm.ppf(q - h))
fhat0 = 1. / (nobs * h) * np.sum(kernel(e / h))
if vcov == 'robust':
d = np.where(e > 0, (q/fhat0)**2, ((1-q)/fhat0)**2)
xtxi = pinv(np.dot(exog.T, exog))
xtdx = np.dot(exog.T * d[np.newaxis, :], exog)
vcov = chain_dot(xtxi, xtdx, xtxi)
elif vcov == 'iid':
vcov = (1. / fhat0)**2 * q * (1 - q) * pinv(np.dot(exog.T, exog))
else:
raise Exception("vcov must be 'robust' or 'iid'")
lfit = QuantRegResults(self, beta, normalized_cov_params=vcov)
lfit.q = q
lfit.iterations = n_iter
lfit.sparsity = 1. / fhat0
lfit.bandwidth = h
lfit.history = history
return RegressionResultsWrapper(lfit)
def _parzen(u):
z = np.where(np.abs(u) <= .5, 4./3 - 8. * u**2 + 8. * np.abs(u)**3,
8. * (1 - np.abs(u))**3 / 3.)
z[np.abs(u) > 1] = 0
return z
kernels = {}
kernels['biw'] = lambda u: 15. / 16 * (1 - u**2)**2 * np.where(np.abs(u) <= 1, 1, 0)
kernels['cos'] = lambda u: np.where(np.abs(u) <= .5, 1 + np.cos(2 * np.pi * u), 0)
kernels['epa'] = lambda u: 3. / 4 * (1-u**2) * np.where(np.abs(u) <= 1, 1, 0)
kernels['gau'] = lambda u: norm.pdf(u)
kernels['par'] = _parzen
#kernels['bet'] = lambda u: np.where(np.abs(u) <= 1, .75 * (1 - u) * (1 + u), 0)
#kernels['log'] = lambda u: logistic.pdf(u) * (1 - logistic.pdf(u))
#kernels['tri'] = lambda u: np.where(np.abs(u) <= 1, 1 - np.abs(u), 0)
#kernels['trw'] = lambda u: 35. / 32 * (1 - u**2)**3 * np.where(np.abs(u) <= 1, 1, 0)
#kernels['uni'] = lambda u: 1. / 2 * np.where(np.abs(u) <= 1, 1, 0)
def hall_sheather(n, q, alpha=.05):
z = norm.ppf(q)
num = 1.5 * norm.pdf(z)**2.
den = 2. * z**2. + 1.
h = n**(-1. / 3) * norm.ppf(1. - alpha / 2.)**(2./3) * (num / den)**(1./3)
return h
def bofinger(n, q):
num = 9. / 2 * norm.pdf(2 * norm.ppf(q))**4
den = (2 * norm.ppf(q)**2 + 1)**2
h = n**(-1. / 5) * (num / den)**(1. / 5)
return h
def chamberlain(n, q, alpha=.05):
return norm.ppf(1 - alpha / 2) * np.sqrt(q*(1 - q) / n)
class QuantRegResults(RegressionResults):
'''Results instance for the QuantReg model'''
@cache_readonly
def prsquared(self):
q = self.q
endog = self.model.endog
e = self.resid
e = np.where(e < 0, (1 - q) * e, q * e)
e = np.abs(e)
ered = endog - stats.scoreatpercentile(endog, q * 100)
ered = np.where(ered < 0, (1 - q) * ered, q * ered)
ered = np.abs(ered)
return 1 - np.sum(e) / np.sum(ered)
#@cache_readonly
def scale(self):
return 1.
@cache_readonly
def bic(self):
return np.nan
@cache_readonly
def aic(self):
return np.nan
@cache_readonly
def llf(self):
return np.nan
@cache_readonly
def rsquared(self):
return np.nan
@cache_readonly
def rsquared_adj(self):
return np.nan
@cache_readonly
def mse(self):
return np.nan
@cache_readonly
def mse_model(self):
return np.nan
@cache_readonly
def mse_total(self):
return np.nan
@cache_readonly
def centered_tss(self):
return np.nan
@cache_readonly
def uncentered_tss(self):
return np.nan
@cache_readonly
def HC0_se(self):
raise NotImplementedError
@cache_readonly
def HC1_se(self):
raise NotImplementedError
@cache_readonly
def HC2_se(self):
raise NotImplementedError
@cache_readonly
def HC3_se(self):
raise NotImplementedError
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""Summarize the Regression Results
Parameters
-----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary
results
"""
#TODO: import where we need it (for now), add as cached attributes
from statsmodels.stats.stattools import (jarque_bera,
omni_normtest, durbin_watson)
jb, jbpv, skew, kurtosis = jarque_bera(self.wresid)
omni, omnipv = omni_normtest(self.wresid)
eigvals = self.eigenvals
condno = self.condition_number
self.diagn = dict(jb=jb, jbpv=jbpv, skew=skew, kurtosis=kurtosis,
omni=omni, omnipv=omnipv, condno=condno,
mineigval=eigvals[0])
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Least Squares']),
('Date:', None),
('Time:', None)
]
top_right = [('Pseudo R-squared:', ["%#8.4g" % self.prsquared]),
('Bandwidth:', ["%#8.4g" % self.bandwidth]),
('Sparsity:', ["%#8.4g" % self.sparsity]),
('No. Observations:', None),
('Df Residuals:', None), #[self.df_resid]), #TODO: spelling
('Df Model:', None) #[self.df_model])
]
diagn_left = [('Omnibus:', ["%#6.3f" % omni]),
('Prob(Omnibus):', ["%#6.3f" % omnipv]),
('Skew:', ["%#6.3f" % skew]),
('Kurtosis:', ["%#6.3f" % kurtosis])
]
diagn_right = [('Durbin-Watson:', ["%#8.3f" % durbin_watson(self.wresid)]),
('Jarque-Bera (JB):', ["%#8.3f" % jb]),
('Prob(JB):', ["%#8.3g" % jbpv]),
('Cond. No.', ["%#8.3g" % condno])
]
if title is None:
title = self.model.__class__.__name__ + ' ' + "Regression Results"
#create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=.05,
use_t=True)
# smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
#yname=yname, xname=xname,
#title="")
#add warnings/notes, added to text format only
etext = []
if eigvals[-1] < 1e-10:
wstr = "The smallest eigenvalue is %6.3g. This might indicate "
wstr += "that there are\n"
wstr += "strong multicollinearity problems or that the design "
wstr += "matrix is singular."
wstr = wstr % eigvals[-1]
etext.append(wstr)
elif condno > 1000: #TODO: what is recommended
wstr = "The condition number is large, %6.3g. This might "
wstr += "indicate that there are\n"
wstr += "strong multicollinearity or other numerical "
wstr += "problems."
wstr = wstr % condno
etext.append(wstr)
if etext:
smry.add_extra_txt(etext)
return smry
| |
from __future__ import unicode_literals
import re
from django import forms
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm,
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget, SetPasswordForm,
UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field
from django.test import TestCase, override_settings
from django.utils import translation
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from .settings import AUTH_TEMPLATES
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: jsmith@example.com>')
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unusable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
)
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
@classmethod
def setUpClass(cls):
super(PasswordResetFormTest, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[("Really sorry to hear you forgot your password.",
"text/html")]).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Test that inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(
re.match(r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload())
)
class ReadOnlyPasswordHashTest(TestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
| |
"""Let's Encrypt client interfaces."""
import abc
import zope.interface
# pylint: disable=no-self-argument,no-method-argument,no-init,inherit-non-class
# pylint: disable=too-few-public-methods
class AccountStorage(object):
"""Accounts storage interface."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def find_all(self): # pragma: no cover
"""Find all accounts.
:returns: All found accounts.
:rtype: list
"""
raise NotImplementedError()
@abc.abstractmethod
def load(self, account_id): # pragma: no cover
"""Load an account by its id.
:raises .AccountNotFound: if account could not be found
:raises .AccountStorageError: if account could not be loaded
"""
raise NotImplementedError()
@abc.abstractmethod
def save(self, account): # pragma: no cover
"""Save account.
:raises .AccountStorageError: if account could not be saved
"""
raise NotImplementedError()
class IPluginFactory(zope.interface.Interface):
"""IPlugin factory.
Objects providing this interface will be called without satisfying
any entry point "extras" (extra dependencies) you might have defined
for your plugin, e.g (excerpt from ``setup.py`` script)::
setup(
...
entry_points={
'letsencrypt.plugins': [
'name=example_project.plugin[plugin_deps]',
],
},
extras_require={
'plugin_deps': ['dep1', 'dep2'],
}
)
Therefore, make sure such objects are importable and usable without
extras. This is necessary, because CLI does the following operations
(in order):
- loads an entry point,
- calls `inject_parser_options`,
- requires an entry point,
- creates plugin instance (`__call__`).
"""
description = zope.interface.Attribute("Short plugin description")
def __call__(config, name):
"""Create new `IPlugin`.
:param IConfig config: Configuration.
:param str name: Unique plugin name.
"""
def inject_parser_options(parser, name):
"""Inject argument parser options (flags).
1. Be nice and prepend all options and destinations with
`~.common.option_namespace` and `~common.dest_namespace`.
2. Inject options (flags) only. Positional arguments are not
allowed, as this would break the CLI.
:param ArgumentParser parser: (Almost) top-level CLI parser.
:param str name: Unique plugin name.
"""
class IPlugin(zope.interface.Interface):
"""Let's Encrypt plugin."""
def prepare():
"""Prepare the plugin.
Finish up any additional initialization.
:raises .PluginError:
when full initialization cannot be completed.
:raises .MisconfigurationError:
when full initialization cannot be completed. Plugin will
be displayed on a list of available plugins.
:raises .NoInstallationError:
when the necessary programs/files cannot be located. Plugin
will NOT be displayed on a list of available plugins.
:raises .NotSupportedError:
when the installation is recognized, but the version is not
currently supported.
"""
def more_info():
"""Human-readable string to help the user.
Should describe the steps taken and any relevant info to help the user
decide which plugin to use.
:rtype str:
"""
class IAuthenticator(IPlugin):
"""Generic Let's Encrypt Authenticator.
Class represents all possible tools processes that have the
ability to perform challenges and attain a certificate.
"""
def get_chall_pref(domain):
"""Return list of challenge preferences.
:param str domain: Domain for which challenge preferences are sought.
:returns: List of challenge types (subclasses of
:class:`acme.challenges.Challenge`) with the most
preferred challenges first. If a type is not specified, it means the
Authenticator cannot perform the challenge.
:rtype: list
"""
def perform(achalls):
"""Perform the given challenge.
:param list achalls: Non-empty (guaranteed) list of
:class:`~letsencrypt.achallenges.AnnotatedChallenge`
instances, such that it contains types found within
:func:`get_chall_pref` only.
:returns: List of ACME
:class:`~acme.challenges.ChallengeResponse` instances
or if the :class:`~acme.challenges.Challenge` cannot
be fulfilled then:
``None``
Authenticator can perform challenge, but not at this time.
``False``
Authenticator will never be able to perform (error).
:rtype: :class:`list` of
:class:`acme.challenges.ChallengeResponse`
:raises .PluginError: If challenges cannot be performed
"""
def cleanup(achalls):
"""Revert changes and shutdown after challenges complete.
:param list achalls: Non-empty (guaranteed) list of
:class:`~letsencrypt.achallenges.AnnotatedChallenge`
instances, a subset of those previously passed to :func:`perform`.
:raises PluginError: if original configuration cannot be restored
"""
class IConfig(zope.interface.Interface):
"""Let's Encrypt user-supplied configuration.
.. warning:: The values stored in the configuration have not been
filtered, stripped or sanitized.
"""
server = zope.interface.Attribute("ACME Directory Resource URI.")
email = zope.interface.Attribute(
"Email used for registration and recovery contact.")
rsa_key_size = zope.interface.Attribute("Size of the RSA key.")
config_dir = zope.interface.Attribute("Configuration directory.")
work_dir = zope.interface.Attribute("Working directory.")
accounts_dir = zope.interface.Attribute(
"Directory where all account information is stored.")
backup_dir = zope.interface.Attribute("Configuration backups directory.")
csr_dir = zope.interface.Attribute(
"Directory where newly generated Certificate Signing Requests "
"(CSRs) are saved.")
in_progress_dir = zope.interface.Attribute(
"Directory used before a permanent checkpoint is finalized.")
key_dir = zope.interface.Attribute("Keys storage.")
temp_checkpoint_dir = zope.interface.Attribute(
"Temporary checkpoint directory.")
renewer_config_file = zope.interface.Attribute(
"Location of renewal configuration file.")
no_verify_ssl = zope.interface.Attribute(
"Disable SSL certificate verification.")
dvsni_port = zope.interface.Attribute(
"Port number to perform DVSNI challenge. "
"Boulder in testing mode defaults to 5001.")
http01_port = zope.interface.Attribute(
"Port used in the SimpleHttp challenge.")
class IInstaller(IPlugin):
"""Generic Let's Encrypt Installer Interface.
Represents any server that an X509 certificate can be placed.
"""
def get_all_names():
"""Returns all names that may be authenticated.
:rtype: `list` of `str`
"""
def deploy_cert(domain, cert_path, key_path, chain_path, fullchain_path):
"""Deploy certificate.
:param str domain: domain to deploy certificate file
:param str cert_path: absolute path to the certificate file
:param str key_path: absolute path to the private key file
:param str chain_path: absolute path to the certificate chain file
:param str fullchain_path: absolute path to the certificate fullchain
file (cert plus chain)
:raises .PluginError: when cert cannot be deployed
"""
def enhance(domain, enhancement, options=None):
"""Perform a configuration enhancement.
:param str domain: domain for which to provide enhancement
:param str enhancement: An enhancement as defined in
:const:`~letsencrypt.constants.ENHANCEMENTS`
:param options: Flexible options parameter for enhancement.
Check documentation of
:const:`~letsencrypt.constants.ENHANCEMENTS`
for expected options for each enhancement.
:raises .PluginError: If Enhancement is not supported, or if
an error occurs during the enhancement.
"""
def supported_enhancements():
"""Returns a list of supported enhancements.
:returns: supported enhancements which should be a subset of
:const:`~letsencrypt.constants.ENHANCEMENTS`
:rtype: :class:`list` of :class:`str`
"""
def get_all_certs_keys():
"""Retrieve all certs and keys set in configuration.
:returns: tuples with form `[(cert, key, path)]`, where:
- `cert` - str path to certificate file
- `key` - str path to associated key file
- `path` - file path to configuration file
:rtype: list
"""
def save(title=None, temporary=False):
"""Saves all changes to the configuration files.
Both title and temporary are needed because a save may be
intended to be permanent, but the save is not ready to be a full
checkpoint
:param str title: The title of the save. If a title is given, the
configuration will be saved as a new checkpoint and put in a
timestamped directory. `title` has no effect if temporary is true.
:param bool temporary: Indicates whether the changes made will
be quickly reversed in the future (challenges)
:raises .PluginError: when save is unsuccessful
"""
def rollback_checkpoints(rollback=1):
"""Revert `rollback` number of configuration checkpoints.
:raises .PluginError: when configuration cannot be fully reverted
"""
def recovery_routine():
"""Revert configuration to most recent finalized checkpoint.
Remove all changes (temporary and permanent) that have not been
finalized. This is useful to protect against crashes and other
execution interruptions.
:raises .errors.PluginError: If unable to recover the configuration
"""
def view_config_changes():
"""Display all of the LE config changes.
:raises .PluginError: when config changes cannot be parsed
"""
def config_test():
"""Make sure the configuration is valid.
:raises .MisconfigurationError: when the config is not in a usable state
"""
def restart():
"""Restart or refresh the server content.
:raises .PluginError: when server cannot be restarted
"""
class IDisplay(zope.interface.Interface):
"""Generic display."""
def notification(message, height, pause):
"""Displays a string message
:param str message: Message to display
:param int height: Height of dialog box if applicable
:param bool pause: Whether or not the application should pause for
confirmation (if available)
"""
def menu(message, choices,
ok_label="OK", cancel_label="Cancel", help_label=""):
"""Displays a generic menu.
:param str message: message to display
:param choices: choices
:type choices: :class:`list` of :func:`tuple` or :class:`str`
:param str ok_label: label for OK button
:param str cancel_label: label for Cancel button
:param str help_label: label for Help button
:returns: tuple of (`code`, `index`) where
`code` - str display exit code
`index` - int index of the user's selection
"""
def input(message):
"""Accept input from the user.
:param str message: message to display to the user
:returns: tuple of (`code`, `input`) where
`code` - str display exit code
`input` - str of the user's input
:rtype: tuple
"""
def yesno(message, yes_label="Yes", no_label="No"):
"""Query the user with a yes/no question.
Yes and No label must begin with different letters.
:param str message: question for the user
:returns: True for "Yes", False for "No"
:rtype: bool
"""
def checklist(message, tags, default_state):
"""Allow for multiple selections from a menu.
:param str message: message to display to the user
:param list tags: where each is of type :class:`str` len(tags) > 0
:param bool default_status: If True, items are in a selected state by
default.
"""
class IValidator(zope.interface.Interface):
"""Configuration validator."""
def certificate(cert, name, alt_host=None, port=443):
"""Verifies the certificate presented at name is cert
:param OpenSSL.crypto.X509 cert: Expected certificate
:param str name: Server's domain name
:param bytes alt_host: Host to connect to instead of the IP
address of host
:param int port: Port to connect to
:returns: True if the certificate was verified successfully
:rtype: bool
"""
def redirect(name, port=80, headers=None):
"""Verify redirect to HTTPS
:param str name: Server's domain name
:param int port: Port to connect to
:param dict headers: HTTP headers to include in request
:returns: True if redirect is successfully enabled
:rtype: bool
"""
def hsts(name):
"""Verify HSTS header is enabled
:param str name: Server's domain name
:returns: True if HSTS header is successfully enabled
:rtype: bool
"""
def ocsp_stapling(name):
"""Verify ocsp stapling for domain
:param str name: Server's domain name
:returns: True if ocsp stapling is successfully enabled
:rtype: bool
"""
class IReporter(zope.interface.Interface):
"""Interface to collect and display information to the user."""
HIGH_PRIORITY = zope.interface.Attribute(
"Used to denote high priority messages")
MEDIUM_PRIORITY = zope.interface.Attribute(
"Used to denote medium priority messages")
LOW_PRIORITY = zope.interface.Attribute(
"Used to denote low priority messages")
def add_message(self, msg, priority, on_crash=True):
"""Adds msg to the list of messages to be printed.
:param str msg: Message to be displayed to the user.
:param int priority: One of HIGH_PRIORITY, MEDIUM_PRIORITY, or
LOW_PRIORITY.
:param bool on_crash: Whether or not the message should be printed if
the program exits abnormally.
"""
def print_messages(self):
"""Prints messages to the user and clears the message queue."""
| |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import os
import sys
import logging
import binascii
import argparse
import time
import copy
import Queue
import threading
from bson.binary import Binary
import rflib.ipc.IPC as IPC
import rflib.ipc.IPCService as IPCService
from rflib.ipc.RFProtocol import *
from rflib.ipc.RFProtocolFactory import RFProtocolFactory
from rflib.defs import *
from rflib.types.Match import *
from rflib.types.Action import *
from rflib.types.Option import *
from rftable import *
from rffastpath import *
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(name)-15s %(levelname)-8s %(message)s',
datefmt='%b %d %H:%M:%S'
)
# Register actions
REGISTER_IDLE = 0
REGISTER_ASSOCIATED = 1
REGISTER_ISL = 2
class RouteModTranslator(object):
DROP_PRIORITY = Option.PRIORITY(PRIORITY_LOWEST + PRIORITY_BAND)
ESTABLISH_PRIORITY = Option.PRIORITY(PRIORITY_LOWEST + PRIORITY_BAND + 1)
CONTROLLER_PRIORITY = Option.PRIORITY(PRIORITY_HIGH)
FASTPATH_PRIORITY = Option.PRIORITY(PRIORITY_HIGH + 1)
DEFAULT_PRIORITY = Option.PRIORITY(PRIORITY_LOWEST + PRIORITY_BAND + 1000)
#The table used to tag fastpath packets
FP_TABLE = 1
def __init__(self, dp_id, ct_id, rftable, isltable, conf, islconf, fpconf, log, labeller):
self.dp_id = dp_id
self.ct_id = ct_id
self.rftable = rftable
self.isltable = isltable
self.fpconf = fpconf
self.islconf = islconf
self.conf = conf
self.labeller = labeller
self.log = log
def configure_datapath(self):
raise Exception
def handle_controller_route_mod(self, entry, rm):
raise Exception
def handle_route_mod(self, entry, rm):
raise Exception
def handle_isl_route_mod(self, entry, rm):
raise Exception
def _get_fastpath_port(self):
"""Returns the fastpath port towards the controller"""
master = []
fpentries = self.fpconf.get_entries_for_dpid(self.ct_id, self.dp_id)
fpentries += self.islconf.get_entries_by_dpid(self.ct_id, self.dp_id)
for fp in fpentries:
if hasattr(fp, "fp_master") and fp.fp_master:
if fp.fp_master != self.dp_id:
master.append(fp)
if len(master) != 1:
self.log.error("We expect a single master fastpath link not %d" % len(master))
if len(master) == 0:
return None
master = master[0]
if master.dp_id == self.dp_id and master.ct_id == self.ct_id:
master_port = master.dp_port
else:
master_port = master.rem_port
return master_port
def _register_fastpaths(self, usetables):
"""Adds rules for all fastpaths that traverse or are created by this forwarding element"""
rms = []
# Treat fastpaths and isls the same
fpentries = self.fpconf.get_entries_for_dpid(self.ct_id, self.dp_id)
fpentries += self.islconf.get_entries_by_dpid(self.ct_id, self.dp_id)
master_port = self._get_fastpath_port()
# Add entries for all directly attached ports
ports = self.conf.get_config_for_dp(self.ct_id, self.dp_id)
rms += self._register_fpports(master_port, ports, usetables)
# Add entries for all fastpaths that we carry
for fp in fpentries:
if hasattr(fp, "fp_master") and fp.fp_master == self.dp_id:
if fp.dp_id == self.dp_id and fp.ct_id == self.ct_id:
dp_port = fp.dp_port
else:
dp_port = fp.rem_port
rms += self._register_fpisl(master_port, fp, dp_port)
return rms
def _register_fpports(self, fp_port, ports, usetables):
"""Adds a rule for each local port to send controller traffic to the next fastpath
or isl link"""
rms = []
if ports == None:
ports = []
for port in ports:
if not hasattr(port, 'fp_label'):
self.log.error("dp %d port %d has no fastpath label" %
(self.dp_id, port.dp_port))
continue
self.log.info("dp %d port %d registering fastpath %s" %
(self.dp_id, port.dp_port, port.fp_label))
# Add rule to tag for every incoming packet based on inport
# in our tagging table
if usetables:
rm = RouteMod(RMT_ADD, self.dp_id)
rm.add_match(Match.IN_PORT(port.dp_port))
self.labeller.rfaction_push_meta(port.fp_label, rm)
rm.add_action(Action.OUTPUT(fp_port))
rm.add_option(self.CONTROLLER_PRIORITY)
rm.set_table(self.FP_TABLE)
rms.append(rm)
# For each incomming fp packet set the correct output port
rm = RouteMod(RMT_ADD, self.dp_id)
rm.add_match(Match.IN_PORT(fp_port))
self.labeller.rfmatch_meta(port.fp_label, rm)
self.labeller.rfaction_pop_meta(rm)
rm.add_action(Action.OUTPUT(port.dp_port))
rm.add_option(self.FASTPATH_PRIORITY)
rms.append(rm)
return rms
def _register_fpisl(self, out_port, isl, dp_port):
"""Adds entries for all fastpaths carried over an isl by putting rules
into this switch"""
rms = []
for label, _ in isl.fast_paths:
# Add rule to forward match packets towards the controller
rm = RouteMod(RMT_ADD, self.dp_id)
rm.add_match(Match.IN_PORT(dp_port))
self.labeller.rfmatch_meta(label, rm)
rm.add_action(Action.OUTPUT(out_port))
rm.add_option(self.FASTPATH_PRIORITY)
rms.append(rm)
# Add rule to forward towards ports
rm = RouteMod(RMT_ADD, self.dp_id)
rm.add_match(Match.IN_PORT(out_port))
self.labeller.rfmatch_meta(label, rm)
rm.add_action(Action.OUTPUT(dp_port))
rm.add_option(self.FASTPATH_PRIORITY)
rms.append(rm)
return rms
class DefaultRouteModTranslator(RouteModTranslator):
def _send_rm_with_matches(self, rm, out_port, entries):
rms = []
for entry in entries:
if out_port != entry.dp_port:
if (entry.get_status() == RFENTRY_ACTIVE or
entry.get_status() == RFISL_ACTIVE):
local_rm = copy.deepcopy(rm)
local_rm.add_match(Match.ETHERNET(entry.eth_addr))
local_rm.add_match(Match.IN_PORT(entry.dp_port))
rms.append(local_rm)
return rms
def configure_datapath(self):
rms = []
# delete all groups
rm = RouteMod(RMT_DELETE_GROUP, self.dp_id)
rms.append(rm)
# delete all flows
rm = RouteMod(RMT_DELETE, self.dp_id)
rms.append(rm)
# catch ipv4 and ipv6 and send to the controller so we can
# do arp and install a rule for the flow
rm = RouteMod(RMT_ADD, self.dp_id)
rm.add_option(self.ESTABLISH_PRIORITY)
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rms.extend(self.handle_controller_route_mod(self,rm))
rm = RouteMod(RMT_ADD, self.dp_id)
rm.add_option(self.ESTABLISH_PRIORITY)
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IPV6))
rms.extend(self.handle_controller_route_mod(self,rm))
# default drop
rm = RouteMod(RMT_ADD, self.dp_id)
#rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_option(self.DROP_PRIORITY)
rms.append(rm)
# ARP
rm = RouteMod(RMT_ADD, self.dp_id)
rm.add_match(Match.ETHERTYPE(ETHERTYPE_ARP))
rm.add_option(self.CONTROLLER_PRIORITY)
rms.extend(self.handle_controller_route_mod(self, rm))
# Register fastpath rules
if self.fpconf.enabled:
rms += self._register_fastpaths(False)
return rms
def handle_controller_route_mod(self, entry, rm):
if self.fpconf.enabled:
rms = []
master_port = self._get_fastpath_port()
# If this only applies to a single port we only install for that
for match in rm.get_matches():
if Match.from_dict(match)._type == RFMT_IN_PORT:
port = self.conf.get_config_for_dp_port(self.ct_id, self.dp_id, match.value)
self.labeller.rfaction_push_meta(port.fp_label, rm)
rm.add_action(Action.OUTPUT(master_port))
return [rm]
# Install for all ports
ports = self.conf.get_config_for_dp(self.ct_id, self.dp_id)
if ports == None:
ports = []
for port in ports:
new_rm = copy.deepcopy(rm)
new_rm.add_match(Match.IN_PORT(port.dp_port))
self.labeller.rfaction_push_meta(port.fp_label, new_rm)
new_rm.add_action(Action.OUTPUT(master_port))
rms.append(new_rm)
return rms
else:
rm.add_action(Action.CONTROLLER())
return [rm]
def handle_route_mod(self, entry, rm):
rms = []
entries = self.rftable.get_entries(dp_id=entry.dp_id,
ct_id=entry.ct_id)
entries.extend(self.isltable.get_entries(dp_id=entry.dp_id,
ct_id=entry.ct_id))
# Replace the VM port with the datapath port
rm.add_action(Action.OUTPUT(entry.dp_port))
rms.extend(self._send_rm_with_matches(rm, entry.dp_port, entries))
return rms
def handle_isl_route_mod(self, r, rm):
rms = []
rm.set_id(self.dp_id)
rm.set_table(0)
rm.set_actions(None)
rm.add_action(Action.SET_ETH_SRC(r.eth_addr))
rm.add_action(Action.SET_ETH_DST(r.rem_eth_addr))
rm.add_action(Action.OUTPUT(r.dp_port))
entries = self.rftable.get_entries(dp_id=r.dp_id, ct_id=r.ct_id)
rms.extend(self._send_rm_with_matches(rm, r.dp_port, entries))
return rms
class SatelliteRouteModTranslator(DefaultRouteModTranslator):
def __init__(self, dp_id, ct_id, rftable, isltable, conf, islconf, fpconf, log, labeller):
super(SatelliteRouteModTranslator, self).__init__(
dp_id, ct_id, rftable, isltable, conf, islconf, fpconf, log, labeller)
self.sent_isl_dl = set()
def handle_isl_route_mod(self, r, rm):
rms = []
for ethertype in (ETHERTYPE_IP, ETHERTYPE_IPV6):
rm.set_matches(None)
rm.add_match(Match.ETHERTYPE(ethertype))
rm.set_options(None)
rm.add_option(self.DEFAULT_PRIORITY)
if r.rem_eth_addr not in self.sent_isl_dl:
self.sent_isl_dl.add(r.rem_eth_addr)
rm.set_id(self.dp_id)
rm.set_table(0)
rm.set_actions(None)
rm.add_action(Action.SET_ETH_SRC(r.eth_addr))
rm.add_action(Action.SET_ETH_DST(r.rem_eth_addr))
rm.add_action(Action.OUTPUT(r.dp_port))
entries = self.rftable.get_entries(dp_id=r.dp_id, ct_id=r.ct_id)
rms.extend(self._send_rm_with_matches(rm, r.dp_port, entries))
return rms
class NoviFlowMultitableRouteModTranslator(RouteModTranslator):
FP_TABLE = 3
FIB_TABLE = 2
ETHER_TABLE = 1
def __init__(self, dp_id, ct_id, rftable, isltable, conf, islconf, fpconf, log, labeller):
super(NoviFlowMultitableRouteModTranslator, self).__init__(
dp_id, ct_id, rftable, isltable, conf, islconf, fpconf, log, labeller)
def _send_rm_with_matches(self, rm, out_port, entries):
rms = []
for entry in entries:
if out_port != entry.dp_port:
if (entry.get_status() == RFENTRY_ACTIVE or
entry.get_status() == RFISL_ACTIVE):
rms.append(rm)
break
return rms
def configure_datapath(self):
rms = []
# delete all groups
rm = RouteMod(RMT_DELETE_GROUP, self.dp_id)
rms.append(rm)
# default group - send to controller
rm = RouteMod(RMT_ADD_GROUP, self.dp_id)
rm.set_group(CONTROLLER_GROUP);
rm.add_action(Action.CONTROLLER())
rms.append(rm)
# delete all flows
rm = RouteMod(RMT_DELETE, self.dp_id)
rms.append(rm)
# catch ipv4 and ipv6 and send to the controller so we can
# do arp and install a rule for the flow
rm = RouteMod(RMT_ADD, self.dp_id)
rm.add_option(self.ESTABLISH_PRIORITY)
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
# Noviflow sets all controller actions on the ether table for performance reasons but this
# will not work there. TODO test this on hardware and see if its a problem.
rms.extend([(x.set_table(self.FIB_TABLE), x)[1] for x in self.handle_controller_route_mod(self,rm)])
rm = RouteMod(RMT_ADD, self.dp_id)
rm.add_option(self.ESTABLISH_PRIORITY)
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IPV6))
rms.extend([(x.set_table(self.FIB_TABLE), x)[1] for x in self.handle_controller_route_mod(self,rm)])
# default drop
for table_id in (0, self.ETHER_TABLE, self.FIB_TABLE):
rm = RouteMod(RMT_ADD, self.dp_id)
rm.set_table(table_id)
rm.add_option(self.DROP_PRIORITY)
rms.append(rm)
rm = RouteMod(RMT_ADD, self.dp_id)
rm.add_match(Match.ETHERNET("ff:ff:ff:ff:ff:ff"))
rm.add_action(Action.GOTO(self.ETHER_TABLE))
rm.add_option(self.CONTROLLER_PRIORITY)
rms.append(rm)
# ARP
rm = RouteMod(RMT_ADD, self.dp_id)
rm.set_table(self.ETHER_TABLE)
rm.add_match(Match.ETHERTYPE(ETHERTYPE_ARP))
rm.add_option(self.CONTROLLER_PRIORITY)
rms.extend(self.handle_controller_route_mod(self, rm))
# IPv4
rm = RouteMod(RMT_ADD, self.dp_id)
rm.set_table(self.ETHER_TABLE)
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_option(self.DEFAULT_PRIORITY)
rm.add_action(Action.GOTO(self.FIB_TABLE))
rms.append(rm)
# Register fastpath rules
if self.fpconf.enabled:
rms += self._register_fastpaths(True)
return rms
def handle_controller_route_mod(self, entry, rm):
rms = []
if not self.fpconf.enabled:
rm.add_action(Action.GROUP(CONTROLLER_GROUP))
else:
rm.add_action(Action.GOTO(self.FP_TABLE))
# should be FIB_TABLE, but see NoviFlow note.
rm.set_table(self.ETHER_TABLE)
dl_dst = None
orig_matches = rm.get_matches()
rm.set_matches(None)
for match_dict in orig_matches:
match = Match.from_dict(match_dict)
match_type = match.type_to_str(match._type)
if match_type == "RFMT_ETHERNET":
dl_dst = match
else:
rm.add_match(match)
rms.append(rm)
if dl_dst is not None:
hw_rm = RouteMod(RMT_CONTROLLER, entry.dp_id)
hw_rm.set_id(rm.get_id())
hw_rm.set_vm_port(rm.get_vm_port())
hw_rm.add_match(dl_dst)
hw_rm.add_action(Action.GOTO(self.ETHER_TABLE))
hw_rm.add_option(self.DEFAULT_PRIORITY)
rms.append(hw_rm)
return rms
def handle_route_mod(self, entry, rm):
rms = []
entries = self.rftable.get_entries(dp_id=entry.dp_id,
ct_id=entry.ct_id)
entries.extend(self.isltable.get_entries(dp_id=entry.dp_id,
ct_id=entry.ct_id))
# Replace the VM port with the datapath port
rm.add_action(Action.OUTPUT(entry.dp_port))
rm.set_table(self.FIB_TABLE)
rm.set_options(None)
rm.add_option(self.CONTROLLER_PRIORITY)
rms.extend(self._send_rm_with_matches(rm, entry.dp_port, entries))
return rms
def handle_isl_route_mod(self, r, rm):
rms = []
rm.set_id(self.dp_id)
rm.set_table(self.FIB_TABLE)
rm.set_options(None)
rm.add_option(self.CONTROLLER_PRIORITY)
rm.set_actions(None)
rm.add_action(Action.SET_ETH_SRC(r.eth_addr))
rm.add_action(Action.SET_ETH_DST(r.rem_eth_addr))
rm.add_action(Action.OUTPUT(r.dp_port))
entries = self.rftable.get_entries(dp_id=r.dp_id, ct_id=r.ct_id)
rms.extend(self._send_rm_with_matches(rm, r.dp_port, entries))
# Add entry to table 0 to match the ISL MAC and passes the packet to FIB table
rm = copy.deepcopy(rm)
rm.set_table(0)
rm.set_actions(None)
rm.add_action(Action.GOTO(self.FIB_TABLE))
rm.set_matches(None)
if self.dp_id == r.dp_id:
rm.add_match(Match.ETHERNET(r.eth_addr))
else:
rm.add_match(Match.ETHERNET(r.rem_eth_addr))
rms.append(rm)
return rms
class CorsaMultitableRouteModTranslator(RouteModTranslator):
DROP_PRIORITY = Option.PRIORITY(0)
CONTROLLER_PRIORITY = Option.PRIORITY(255)
DEFAULT_PRIORITY = Option.PRIORITY(PRIORITY_LOWEST + PRIORITY_BAND + 1)
VLAN_MPLS_TABLE = 1
VLAN_TABLE = 2
MPLS_TABLE = 3 # not currently implemented
ETHER_TABLE = 4
COS_MAP_TABLE = 5
FIB_TABLE = 6
LOCAL_TABLE = 9
def __init__(self, dp_id, ct_id, rftable, isltable, conf, islconf, fpconf, log, labeller):
super(CorsaMultitableRouteModTranslator, self).__init__(
dp_id, ct_id, rftable, isltable, conf, islconf, fpconf, log, labeller)
self.last_groupid = CONTROLLER_GROUP
self.actions_to_groupid = {}
def configure_datapath(self):
rms = []
# delete all groups
rm = RouteMod(RMT_DELETE_GROUP, self.dp_id)
rms.append(rm)
# delete all flows
rm = RouteMod(RMT_DELETE, self.dp_id)
rms.append(rm)
# default drop
for table_id in (0, self.VLAN_MPLS_TABLE, self.VLAN_TABLE,
self.ETHER_TABLE, self.FIB_TABLE):
rm = RouteMod(RMT_ADD, self.dp_id)
rm.set_table(table_id)
rm.add_option(self.DROP_PRIORITY)
rms.append(rm)
## Table 0
rm = RouteMod(RMT_ADD, self.dp_id)
rm.add_match(Match.ETHERNET("ff:ff:ff:ff:ff:ff"))
rm.add_action(Action.GOTO(self.VLAN_MPLS_TABLE))
rm.add_option(self.CONTROLLER_PRIORITY)
rms.append(rm)
## VLAN/MPLS table 1
rm = RouteMod(RMT_ADD, self.dp_id)
rm.set_table(self.VLAN_MPLS_TABLE)
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_action(Action.GOTO(self.VLAN_TABLE))
rm.add_option(self.CONTROLLER_PRIORITY)
rms.append(rm)
rm = RouteMod(RMT_ADD, self.dp_id)
rm.set_table(self.VLAN_MPLS_TABLE)
rm.add_match(Match.ETHERTYPE(ETHERTYPE_ARP))
rm.add_action(Action.GOTO(self.VLAN_TABLE))
rm.add_option(self.CONTROLLER_PRIORITY)
rms.append(rm)
rm = RouteMod(RMT_ADD, self.dp_id)
rm.set_table(self.VLAN_MPLS_TABLE)
rm.add_match(Match.ETHERTYPE(0x8100))
rm.add_action(Action.GOTO(self.VLAN_TABLE))
rm.add_option(self.CONTROLLER_PRIORITY)
rms.append(rm)
## VLAN table 2
# no default flows other than drop.
## Ether type table 3
# ARP
rm = RouteMod(RMT_ADD, self.dp_id)
rm.set_table(self.ETHER_TABLE)
rm.add_match(Match.ETHERTYPE(ETHERTYPE_ARP))
rm.add_action(Action.CONTROLLER())
rm.add_option(self.CONTROLLER_PRIORITY)
rms.append(rm)
# IPv4
rm = RouteMod(RMT_ADD, self.dp_id)
rm.set_table(self.ETHER_TABLE)
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_option(self.CONTROLLER_PRIORITY)
rm.add_action(Action.GOTO(self.COS_MAP_TABLE))
rms.append(rm)
# COS table 5 (just map to FIB table)
rm = RouteMod(RMT_ADD, self.dp_id)
rm.set_table(self.COS_MAP_TABLE)
rm.add_action(Action.GOTO(self.FIB_TABLE))
rm.add_option(self.DROP_PRIORITY)
rms.append(rm)
## Local table temporary catch-all entry (table 9)
rm = RouteMod(RMT_ADD, self.dp_id)
rm.set_table(self.LOCAL_TABLE)
rm.add_action(Action.CONTROLLER())
rm.add_option(self.CONTROLLER_PRIORITY)
rms.append(rm)
return rms
def _send_rm_with_matches(self, rm, out_port, entries):
rms = []
for entry in entries:
if out_port != entry.dp_port:
if (entry.get_status() == RFENTRY_ACTIVE or
entry.get_status() == RFISL_ACTIVE):
dst_eth = None
actions = rm.actions
rm.set_actions(None)
for action_dict in actions:
action = Action.from_dict(action_dict)
action_type = action.type_to_str(action._type)
if action_type == 'RFAT_SET_ETH_DST':
dst_eth = action.get_value()
elif action_type == 'RFAT_SWAP_VLAN_ID':
vlan_id = action.get_value()
action = Action.SET_VLAN_ID(vlan_id)
rm.add_action(action)
if dst_eth not in self.actions_to_groupid:
self.last_groupid += 1
new_groupid = self.last_groupid
self.actions_to_groupid[dst_eth] = new_groupid
group_rm = RouteMod(RMT_ADD_GROUP, self.dp_id)
group_rm.set_group(new_groupid)
group_rm.set_actions(rm.actions)
rms.append(group_rm)
self.log.info("adding new group %u for Ethernet destination %s" % (
new_groupid, dst_eth))
rm.set_actions(None)
rm.add_action(Action.GROUP(self.actions_to_groupid[dst_eth]))
rms.append(rm)
break
return rms
def handle_controller_route_mod(self, entry, rm):
rms = []
rm.add_action(Action.GOTO(self.LOCAL_TABLE))
rm.set_table(self.FIB_TABLE)
dl_dst = None
dst_vlan = None
orig_matches = rm.get_matches()
rm.set_matches(None)
for match_dict in orig_matches:
match = Match.from_dict(match_dict)
match_type = match.type_to_str(match._type)
if match_type == "RFMT_ETHERNET":
dl_dst = match
# TODO: support more than IP address matches
# TODO: support more than IPv4
elif match_type == "RFMT_IPV4":
rm.add_match(Match.ETHERTYPE(ETHERTYPE_IP))
rm.add_match(match)
elif match_type == "RFMT_VLAN_ID":
dst_vlan = match.get_value()
if rm.matches:
rms.append(rm)
if dst_vlan is not None:
vlan_rm = RouteMod(RMT_ADD, self.dp_id)
vlan_rm.set_table(self.VLAN_TABLE)
vlan_rm.add_match(Match.IN_PORT(entry.dp_port))
vlan_rm.add_match(Match.VLAN_ID(dst_vlan))
vlan_rm.add_action(Action.STRIP_VLAN_DEFERRED())
vlan_rm.add_action(Action.GOTO(self.ETHER_TABLE))
vlan_rm.add_option(self.CONTROLLER_PRIORITY)
self.log.info("adding new VLAN strip rule for VLAN %s" % (dst_vlan))
rms.append(vlan_rm)
if dl_dst is not None:
hw_rm = RouteMod(RMT_CONTROLLER, entry.dp_id)
hw_rm.set_id(rm.get_id())
hw_rm.set_vm_port(rm.get_vm_port())
hw_rm.add_match(dl_dst)
hw_rm.add_action(Action.GOTO(self.VLAN_MPLS_TABLE))
hw_rm.add_option(self.DEFAULT_PRIORITY)
rms.append(hw_rm)
return rms
def handle_route_mod(self, entry, rm):
rms = []
entries = self.rftable.get_entries(dp_id=entry.dp_id,
ct_id=entry.ct_id)
entries.extend(self.isltable.get_entries(dp_id=entry.dp_id,
ct_id=entry.ct_id))
# Replace the VM port with the datapath port
rm.add_action(Action.OUTPUT(entry.dp_port))
rm.set_table(self.FIB_TABLE)
rms.extend(self._send_rm_with_matches(rm, entry.dp_port, entries))
return rms
class RFServer(RFProtocolFactory, IPC.IPCMessageProcessor):
def __init__(self, configfile, islconffile, multitabledps, satellitedps, fpconf):
self.config = RFConfig(configfile)
self.islconf = RFISLConf(islconffile)
self.fpconf = RFFPConf(fpconf)
self.labeller = MetaVLAN()
try:
self.multitabledps = set([int(x, 16) for x in multitabledps.split(",")])
except ValueError:
self.multitabledps = set()
try:
self.satellitedps = set([int(x, 16) for x in satellitedps.split(",")])
except ValueError:
self.satellitedps = set()
# Initialise state tables
self.rftable = RFTable()
self.isltable = RFISLTable()
self.route_mod_translator = {}
# Logging
self.log = logging.getLogger("rfserver")
if self.satellitedps:
self.log.info("Datapaths that are ISL satellites: %s",
list(self.satellitedps))
if self.multitabledps:
self.log.info("Datapaths that support multiple tables: %s",
list(self.multitabledps))
if self.fpconf:
self.log.info("List of fastpath attachments: %s", list(self.fpconf.get_entries_all()))
# If we have at least one fastpath link we are using fastpath
if len(self.fpconf.get_entries_all()) > 0:
self.fpconf.enabled = True
else:
self.fpconf.enabled = False
if self.fpconf.enabled:
self.log.info("Fastpath is enabled")
else:
self.log.info("Fastpath is disabled")
fp_allocate_labels(self.labeller, self.log, self.config, self.fpconf, self.islconf)
self.ack_q = Queue.Queue()
self.dp_q = Queue.Queue()
self.ipc_lock = threading.Lock()
self.routemod_outstanding = threading.Event()
self.ipc = IPCService.for_server(RFSERVER_ID)
self.worker = threading.Thread(target=self.dp_worker)
self.worker.daemon = True
self.worker.start()
self.ipc.listen(RFCLIENT_RFSERVER_CHANNEL, self, self, False)
self.ipc.listen(RFSERVER_RFPROXY_CHANNEL, self, self, True)
def ipc_send(self, channel, channel_id, msg):
self.ipc_lock.acquire()
self.ipc.send(channel, channel_id, msg)
self.ipc_lock.release()
def dp_worker(self):
while True:
(ct_id, rm) = self.dp_q.get(block=True)
self.ipc_send(RFSERVER_RFPROXY_CHANNEL, ct_id, rm)
self.dp_q.task_done()
def send_routemod_acks(self):
while not self.ack_q.empty():
(vm_id, ack) = self.ack_q.get()
self.ipc_send(RFCLIENT_RFSERVER_CHANNEL, vm_id, ack)
self.ack_q.task_done()
def process(self, from_, to, channel, msg):
type_ = msg.get_type()
if channel == RFCLIENT_RFSERVER_CHANNEL:
if type_ == ROUTE_MOD:
self.register_route_mod(msg)
elif type_ == PORT_REGISTER:
self.register_vm_port(msg.get_vm_id(), msg.get_vm_port(),
msg.get_hwaddress())
elif channel == RFSERVER_RFPROXY_CHANNEL:
if type_ == DATAPATH_PORT_REGISTER:
self.register_dp_port(msg.get_ct_id(),
msg.get_dp_id(),
msg.get_dp_port())
elif type_ == DATAPATH_DOWN:
self.set_dp_down(msg.get_ct_id(), msg.get_dp_id())
elif type_ == VIRTUAL_PLANE_MAP:
self.map_port(msg.get_vm_id(), msg.get_vm_port(),
msg.get_vs_id(), msg.get_vs_port())
elif type_ == ROUTE_MOD:
self.send_routemod_acks()
# Port register methods
def register_vm_port(self, vm_id, vm_port, eth_addr):
action = None
config_entry = self.config.get_config_for_vm_port(vm_id, vm_port)
if config_entry is None:
# Register idle VM awaiting for configuration
action = REGISTER_IDLE
self.log.warning('No config entry for client port (vm_id=%s, vm_port=%i)'
% (format_id(vm_id), vm_port))
else:
entry = self.rftable.get_entry_by_dp_port(config_entry.ct_id,
config_entry.dp_id,
config_entry.dp_port)
# If there's no entry, we have no DP, register VM as idle
if entry is None:
action = REGISTER_IDLE
# If there's an idle DP entry matching configuration, associate
elif entry.get_status() == RFENTRY_IDLE_DP_PORT:
action = REGISTER_ASSOCIATED
# Apply action
if action == REGISTER_IDLE:
self.rftable.set_entry(RFEntry(vm_id=vm_id, vm_port=vm_port,
eth_addr=eth_addr))
self.log.info("Registering client port as idle (vm_id=%s, "
"vm_port=%i, eth_addr=%s)" % (format_id(vm_id),
vm_port, eth_addr))
elif action == REGISTER_ASSOCIATED:
entry.associate(vm_id, vm_port, eth_addr=eth_addr)
self.rftable.set_entry(entry)
self.log.info("Registering client port and associating to "
"datapath port (vm_id=%s, vm_port=%i, "
"eth_addr = %s, dp_id=%s, dp_port=%s)"
% (format_id(vm_id), vm_port, eth_addr,
format_id(entry.dp_id), entry.dp_port))
def queue_routemod_ack(self, ct_id, vm_id, vm_port):
self.ack_q.put((str(vm_id),
PortConfig(vm_id=vm_id, vm_port=vm_port, operation_id=PCT_ROUTEMOD_ACK)))
def send_route_mod(self, ct_id, rm):
rm.add_option(Option.CT_ID(ct_id))
self.dp_q.put((str(ct_id), rm))
# Handle RouteMod messages (type ROUTE_MOD)
#
# Takes a RouteMod, replaces its VM id,port with the associated DP id,port
# and sends to the corresponding controller
def register_route_mod(self, rm):
vm_id = rm.get_id()
vm_port = rm.get_vm_port()
# Find the (vmid, vm_port), (dpid, dpport) pair
entry = self.rftable.get_entry_by_vm_port(vm_id, vm_port)
translator = self.route_mod_translator[entry.dp_id]
# If we can't find an associated datapath for this RouteMod,
# drop it.
if entry is None or entry.get_status() == RFENTRY_IDLE_VM_PORT:
self.log.info("Received RouteMod destined for unknown "
"datapath - Dropping (vm_id=%s, vm_port=%d)" %
(format_id(vm_id), vm_port))
return
# Replace the VM id,port with the Datapath id.port
rm.set_id(int(entry.dp_id))
rms = []
if rm.get_mod() is RMT_CONTROLLER:
rms.extend(translator.handle_controller_route_mod(entry, rm))
elif rm.get_mod() in (RMT_ADD, RMT_DELETE):
rms.extend(translator.handle_route_mod(entry, rm))
remote_dps = self.isltable.get_entries(rem_ct=entry.ct_id,
rem_id=entry.dp_id)
for r in remote_dps:
if r.get_status() == RFISL_ACTIVE:
local_rm = copy.deepcopy(rm)
remote_translator = self.route_mod_translator[int(r.dp_id)]
rms.extend(remote_translator.handle_isl_route_mod(r, local_rm))
else:
self.log.info("Received RouteMod with unknown type: %s " % rm)
for rm in rms:
self.send_route_mod(entry.ct_id, rm)
self.queue_routemod_ack(entry.ct_id, vm_id, vm_port)
# DatapathPortRegister methods
def register_dp_port(self, ct_id, dp_id, dp_port):
stop = self.config_dp(ct_id, dp_id)
if stop:
return
# The logic down here is pretty much the same as register_vm_port
action = None
config_entry = self.config.get_config_for_dp_port(ct_id, dp_id,
dp_port)
if config_entry is None:
islconfs = self.islconf.get_entries_by_port(ct_id, dp_id, dp_port)
if islconfs:
action = REGISTER_ISL
else:
# Register idle DP awaiting for configuration
action = REGISTER_IDLE
else:
entry = self.rftable.get_entry_by_vm_port(config_entry.vm_id,
config_entry.vm_port)
# If there's no entry, we have no VM, register DP as idle
if entry is None:
action = REGISTER_IDLE
# If there's an idle VM entry matching configuration, associate
elif entry.get_status() == RFENTRY_IDLE_VM_PORT:
action = REGISTER_ASSOCIATED
# Apply action
if action == REGISTER_IDLE:
self.rftable.set_entry(RFEntry(ct_id=ct_id, dp_id=dp_id,
dp_port=dp_port))
self.log.info("Registering datapath port as idle (dp_id=%s, "
"dp_port=%i)" % (format_id(dp_id), dp_port))
elif action == REGISTER_ASSOCIATED:
entry.associate(dp_id, dp_port, ct_id)
self.rftable.set_entry(entry)
self.log.info("Registering datapath port and associating to "
"client port (dp_id=%s, dp_port=%i, vm_id=%s, "
"vm_port=%s)" % (format_id(dp_id), dp_port,
format_id(entry.vm_id),
entry.vm_port))
elif action == REGISTER_ISL:
self._register_islconf(islconfs, ct_id, dp_id, dp_port)
def _register_islconf(self, c_entries, ct_id, dp_id, dp_port):
for conf in c_entries:
entry = None
eth_addr = None
if conf.rem_id != dp_id or conf.rem_ct != ct_id:
entry = self.isltable.get_entry_by_addr(conf.rem_ct,
conf.rem_id,
conf.rem_port,
conf.rem_eth_addr)
eth_addr = conf.eth_addr
else:
entry = self.isltable.get_entry_by_addr(conf.ct_id,
conf.dp_id,
conf.dp_port,
conf.eth_addr)
eth_addr = conf.rem_eth_addr
if entry is None:
n_entry = RFISLEntry(vm_id=conf.vm_id, ct_id=ct_id,
dp_id=dp_id, dp_port=dp_port,
eth_addr=eth_addr)
self.isltable.set_entry(n_entry)
self.log.info("Registering ISL port as idle "
"(dp_id=%s, dp_port=%i, eth_addr=%s)" %
(format_id(dp_id), dp_port, eth_addr))
elif entry.get_status() == RFISL_IDLE_DP_PORT:
entry.associate(ct_id, dp_id, dp_port, eth_addr)
self.isltable.set_entry(entry)
n_entry = self.isltable.get_entry_by_remote(entry.ct_id,
entry.dp_id,
entry.dp_port,
entry.eth_addr)
if n_entry is None:
n_entry = RFISLEntry(vm_id=entry.vm_id, ct_id=ct_id,
dp_id=dp_id, dp_port=dp_port,
eth_addr=entry.rem_eth_addr,
rem_ct=entry.ct_id,
rem_id=entry.dp_id,
rem_port=entry.dp_port,
rem_eth_addr=entry.eth_addr)
self.isltable.set_entry(n_entry)
else:
n_entry.associate(ct_id, dp_id, dp_port, eth_addr)
self.isltable.set_entry(n_entry)
self.log.info("Registering ISL port and associating to "
"remote ISL port (ct_id=%s, dp_id=%s, "
"dp_port=%s, rem_ct=%s, rem_id=%s, "
"rem_port=%s)" % (ct_id, format_id(dp_id),
dp_port, entry.ct_id,
format_id(entry.dp_id),
entry.dp_port))
def send_datapath_config_messages(self, ct_id, dp_id):
rms = self.route_mod_translator[dp_id].configure_datapath()
for rm in rms:
self.send_route_mod(ct_id, rm)
def config_dp(self, ct_id, dp_id):
if is_rfvs(dp_id):
return True
else:
if (self.rftable.is_dp_registered(ct_id, dp_id) or
self.isltable.is_dp_registered(ct_id, dp_id)):
if dp_id not in self.route_mod_translator:
self.log.info("Configuring datapath (dp_id=%s)" % format_id(dp_id))
if dp_id in self.multitabledps:
self.route_mod_translator[dp_id] = NoviFlowMultitableRouteModTranslator(
dp_id, ct_id, self.rftable, self.isltable, self.config, self.islconf,
self.fpconf, self.log, self.labeller)
elif dp_id in self.satellitedps:
self.route_mod_translator[dp_id] = SatelliteRouteModTranslator(
dp_id, ct_id, self.rftable, self.isltable, self.config, self.islconf,
self.fpconf, self.log, self.labeller)
else:
self.route_mod_translator[dp_id] = DefaultRouteModTranslator(
dp_id, ct_id, self.rftable, self.isltable, self.config, self.islconf,
self.fpconf, self.log, self.labeller)
self.send_datapath_config_messages(ct_id, dp_id)
return False
# DatapathDown methods
def set_dp_down(self, ct_id, dp_id):
for entry in self.rftable.get_dp_entries(ct_id, dp_id):
# For every port registered in that datapath, put it down
self.set_dp_port_down(entry.ct_id, entry.dp_id, entry.dp_port)
for entry in self.isltable.get_dp_entries(ct_id, dp_id):
entry.make_idle(RFISL_IDLE_REMOTE)
self.isltable.set_entry(entry)
for entry in self.isltable.get_entries(rem_ct=ct_id, rem_id=dp_id):
entry.make_idle(RFISL_IDLE_DP_PORT)
self.isltable.set_entry(entry)
self.log.info("Datapath down (dp_id=%s)" % format_id(dp_id))
def set_dp_port_down(self, ct_id, dp_id, dp_port):
entry = self.rftable.get_entry_by_dp_port(ct_id, dp_id, dp_port)
if entry is not None:
# If the DP port is registered, delete it and leave only the
# associated VM port. Reset this VM port so it can be reused.
vm_id, vm_port = entry.vm_id, entry.vm_port
entry.make_idle(RFENTRY_IDLE_VM_PORT)
self.rftable.set_entry(entry)
if vm_id is not None:
self.reset_vm_port(vm_id, vm_port)
self.log.debug("Datapath port down (dp_id=%s, dp_port=%i)" %
(format_id(dp_id), dp_port))
def reset_vm_port(self, vm_id, vm_port):
if vm_id is None:
return
self.ipc_send(RFCLIENT_RFSERVER_CHANNEL, str(vm_id),
PortConfig(vm_id=vm_id, vm_port=vm_port,
operation_id=PCT_RESET))
self.log.info("Resetting client port (vm_id=%s, vm_port=%i)" %
(format_id(vm_id), vm_port))
# PortMap methods
def map_port(self, vm_id, vm_port, vs_id, vs_port):
entry = self.rftable.get_entry_by_vm_port(vm_id, vm_port)
if entry is not None and entry.get_status() == RFENTRY_ASSOCIATED:
# If the association is valid, activate it
entry.activate(vs_id, vs_port)
self.rftable.set_entry(entry)
msg = DataPlaneMap(ct_id=entry.ct_id,
dp_id=entry.dp_id, dp_port=entry.dp_port,
vs_id=vs_id, vs_port=vs_port)
self.ipc_send(RFSERVER_RFPROXY_CHANNEL, str(entry.ct_id), msg)
msg = PortConfig(vm_id=vm_id, vm_port=vm_port,
operation_id=PCT_MAP_SUCCESS)
self.ipc_send(RFCLIENT_RFSERVER_CHANNEL, str(entry.vm_id), msg)
self.log.info("Mapping client-datapath association "
"(vm_id=%s, vm_port=%i, dp_id=%s, "
"dp_port=%i, vs_id=%s, vs_port=%i)" %
(format_id(entry.vm_id), entry.vm_port,
format_id(entry.dp_id), entry.dp_port,
format_id(entry.vs_id), entry.vs_port))
if __name__ == "__main__":
description = 'RFServer co-ordinates RFClient and RFProxy instances, ' \
'listens for route updates, and configures flow tables'
epilog = 'Report bugs to: https://github.com/routeflow/RouteFlow/issues'
config = os.path.dirname(os.path.realpath(__file__)) + "/config.csv"
islconf = os.path.dirname(os.path.realpath(__file__)) + "/islconf.csv"
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('configfile', default=config,
help='VM-VS-DP mapping configuration file')
parser.add_argument('-i', '--islconfig', default=islconf,
help='ISL mapping configuration file')
parser.add_argument('-m', '--multitabledps', default='',
help='List of datapaths that support multiple tables')
parser.add_argument('-s', '--satellitedps', default='',
help='List of datapaths that default forward to ISL peer')
parser.add_argument('-f', '--fastpaths', default='',
help='List of "fastpath" link(s) to the controller')
args = parser.parse_args()
server = RFServer(args.configfile, args.islconfig, args.multitabledps, args.satellitedps, args.fastpaths)
| |
#! /usr/bin/env python3
#
# Copyright 2017-2020 Linaro Limited
# Copyright 2019-2020 Arm Limited
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import click
import getpass
import imgtool.keys as keys
import sys
from imgtool import image, imgtool_version
from imgtool.version import decode_version
from .keys import (
RSAUsageError, ECDSAUsageError, Ed25519UsageError, X25519UsageError)
MIN_PYTHON_VERSION = (3, 6)
if sys.version_info < MIN_PYTHON_VERSION:
sys.exit("Python %s.%s or newer is required by imgtool."
% MIN_PYTHON_VERSION)
def gen_rsa2048(keyfile, passwd):
keys.RSA.generate().export_private(path=keyfile, passwd=passwd)
def gen_rsa3072(keyfile, passwd):
keys.RSA.generate(key_size=3072).export_private(path=keyfile,
passwd=passwd)
def gen_ecdsa_p256(keyfile, passwd):
keys.ECDSA256P1.generate().export_private(keyfile, passwd=passwd)
def gen_ecdsa_p224(keyfile, passwd):
print("TODO: p-224 not yet implemented")
def gen_ed25519(keyfile, passwd):
keys.Ed25519.generate().export_private(path=keyfile, passwd=passwd)
def gen_x25519(keyfile, passwd):
keys.X25519.generate().export_private(path=keyfile, passwd=passwd)
valid_langs = ['c', 'rust']
keygens = {
'rsa-2048': gen_rsa2048,
'rsa-3072': gen_rsa3072,
'ecdsa-p256': gen_ecdsa_p256,
'ecdsa-p224': gen_ecdsa_p224,
'ed25519': gen_ed25519,
'x25519': gen_x25519,
}
def load_key(keyfile):
# TODO: better handling of invalid pass-phrase
key = keys.load(keyfile)
if key is not None:
return key
passwd = getpass.getpass("Enter key passphrase: ").encode('utf-8')
return keys.load(keyfile, passwd)
def get_password():
while True:
passwd = getpass.getpass("Enter key passphrase: ")
passwd2 = getpass.getpass("Reenter passphrase: ")
if passwd == passwd2:
break
print("Passwords do not match, try again")
# Password must be bytes, always use UTF-8 for consistent
# encoding.
return passwd.encode('utf-8')
@click.option('-p', '--password', is_flag=True,
help='Prompt for password to protect key')
@click.option('-t', '--type', metavar='type', required=True,
type=click.Choice(keygens.keys()), prompt=True,
help='{}'.format('One of: {}'.format(', '.join(keygens.keys()))))
@click.option('-k', '--key', metavar='filename', required=True)
@click.command(help='Generate pub/private keypair')
def keygen(type, key, password):
password = get_password() if password else None
keygens[type](key, password)
@click.option('-l', '--lang', metavar='lang', default=valid_langs[0],
type=click.Choice(valid_langs))
@click.option('-k', '--key', metavar='filename', required=True)
@click.command(help='Dump public key from keypair')
def getpub(key, lang):
key = load_key(key)
if key is None:
print("Invalid passphrase")
elif lang == 'c':
key.emit_c_public()
elif lang == 'rust':
key.emit_rust_public()
else:
raise ValueError("BUG: should never get here!")
@click.option('--minimal', default=False, is_flag=True,
help='Reduce the size of the dumped private key to include only '
'the minimum amount of data required to decrypt. This '
'might require changes to the build config. Check the docs!'
)
@click.option('-k', '--key', metavar='filename', required=True)
@click.command(help='Dump private key from keypair')
def getpriv(key, minimal):
key = load_key(key)
if key is None:
print("Invalid passphrase")
try:
key.emit_private(minimal)
except (RSAUsageError, ECDSAUsageError, Ed25519UsageError,
X25519UsageError) as e:
raise click.UsageError(e)
@click.argument('imgfile')
@click.option('-k', '--key', metavar='filename')
@click.command(help="Check that signed image can be verified by given key")
def verify(key, imgfile):
key = load_key(key) if key else None
ret, version, digest = image.Image.verify(imgfile, key)
if ret == image.VerifyResult.OK:
print("Image was correctly validated")
print("Image version: {}.{}.{}+{}".format(*version))
print("Image digest: {}".format(digest.hex()))
return
elif ret == image.VerifyResult.INVALID_MAGIC:
print("Invalid image magic; is this an MCUboot image?")
elif ret == image.VerifyResult.INVALID_TLV_INFO_MAGIC:
print("Invalid TLV info magic; is this an MCUboot image?")
elif ret == image.VerifyResult.INVALID_HASH:
print("Image has an invalid sha256 digest")
elif ret == image.VerifyResult.INVALID_SIGNATURE:
print("No signature found for the given key")
else:
print("Unknown return code: {}".format(ret))
sys.exit(1)
def validate_version(ctx, param, value):
try:
decode_version(value)
return value
except ValueError as e:
raise click.BadParameter("{}".format(e))
def validate_security_counter(ctx, param, value):
if value is not None:
if value.lower() == 'auto':
return 'auto'
else:
try:
return int(value, 0)
except ValueError:
raise click.BadParameter(
"{} is not a valid integer. Please use code literals "
"prefixed with 0b/0B, 0o/0O, or 0x/0X as necessary."
.format(value))
def validate_header_size(ctx, param, value):
min_hdr_size = image.IMAGE_HEADER_SIZE
if value < min_hdr_size:
raise click.BadParameter(
"Minimum value for -H/--header-size is {}".format(min_hdr_size))
return value
def get_dependencies(ctx, param, value):
if value is not None:
versions = []
images = re.findall(r"\((\d+)", value)
if len(images) == 0:
raise click.BadParameter(
"Image dependency format is invalid: {}".format(value))
raw_versions = re.findall(r",\s*([0-9.+]+)\)", value)
if len(images) != len(raw_versions):
raise click.BadParameter(
'''There's a mismatch between the number of dependency images
and versions in: {}'''.format(value))
for raw_version in raw_versions:
try:
versions.append(decode_version(raw_version))
except ValueError as e:
raise click.BadParameter("{}".format(e))
dependencies = dict()
dependencies[image.DEP_IMAGES_KEY] = images
dependencies[image.DEP_VERSIONS_KEY] = versions
return dependencies
class BasedIntParamType(click.ParamType):
name = 'integer'
def convert(self, value, param, ctx):
try:
return int(value, 0)
except ValueError:
self.fail('%s is not a valid integer. Please use code literals '
'prefixed with 0b/0B, 0o/0O, or 0x/0X as necessary.'
% value, param, ctx)
@click.argument('outfile')
@click.argument('infile')
@click.option('--custom-tlv', required=False, nargs=2, default=[],
multiple=True, metavar='[tag] [value]',
help='Custom TLV that will be placed into protected area. '
'Add "0x" prefix if the value should be interpreted as an '
'integer, otherwise it will be interpreted as a string. '
'Specify the option multiple times to add multiple TLVs.')
@click.option('-R', '--erased-val', type=click.Choice(['0', '0xff']),
required=False,
help='The value that is read back from erased flash.')
@click.option('-x', '--hex-addr', type=BasedIntParamType(), required=False,
help='Adjust address in hex output file.')
@click.option('-L', '--load-addr', type=BasedIntParamType(), required=False,
help='Load address for image when it should run from RAM.')
@click.option('--save-enctlv', default=False, is_flag=True,
help='When upgrading, save encrypted key TLVs instead of plain '
'keys. Enable when BOOT_SWAP_SAVE_ENCTLV config option '
'was set.')
@click.option('-E', '--encrypt', metavar='filename',
help='Encrypt image using the provided public key. '
'(Not supported in direct-xip or ram-load mode.)')
@click.option('-e', '--endian', type=click.Choice(['little', 'big']),
default='little', help="Select little or big endian")
@click.option('--overwrite-only', default=False, is_flag=True,
help='Use overwrite-only instead of swap upgrades')
@click.option('--boot-record', metavar='sw_type', help='Create CBOR encoded '
'boot record TLV. The sw_type represents the role of the '
'software component (e.g. CoFM for coprocessor firmware). '
'[max. 12 characters]')
@click.option('-M', '--max-sectors', type=int,
help='When padding allow for this amount of sectors (defaults '
'to 128)')
@click.option('--confirm', default=False, is_flag=True,
help='When padding the image, mark it as confirmed (implies '
'--pad)')
@click.option('--pad', default=False, is_flag=True,
help='Pad image to --slot-size bytes, adding trailer magic')
@click.option('-S', '--slot-size', type=BasedIntParamType(), required=True,
help='Size of the slot. If the slots have different sizes, use '
'the size of the secondary slot.')
@click.option('--pad-header', default=False, is_flag=True,
help='Add --header-size zeroed bytes at the beginning of the '
'image')
@click.option('-H', '--header-size', callback=validate_header_size,
type=BasedIntParamType(), required=True)
@click.option('--pad-sig', default=False, is_flag=True,
help='Add 0-2 bytes of padding to ECDSA signature '
'(for mcuboot <1.5)')
@click.option('-d', '--dependencies', callback=get_dependencies,
required=False, help='''Add dependence on another image, format:
"(<image_ID>,<image_version>), ... "''')
@click.option('-s', '--security-counter', callback=validate_security_counter,
help='Specify the value of security counter. Use the `auto` '
'keyword to automatically generate it from the image version.')
@click.option('-v', '--version', callback=validate_version, required=True)
@click.option('--align', type=click.Choice(['1', '2', '4', '8']),
required=True)
@click.option('--public-key-format', type=click.Choice(['hash', 'full']),
default='hash', help='In what format to add the public key to '
'the image manifest: full key or hash of the key.')
@click.option('-k', '--key', metavar='filename')
@click.command(help='''Create a signed or unsigned image\n
INFILE and OUTFILE are parsed as Intel HEX if the params have
.hex extension, otherwise binary format is used''')
def sign(key, public_key_format, align, version, pad_sig, header_size,
pad_header, slot_size, pad, confirm, max_sectors, overwrite_only,
endian, encrypt, infile, outfile, dependencies, load_addr, hex_addr,
erased_val, save_enctlv, security_counter, boot_record, custom_tlv):
if confirm:
# Confirmed but non-padded images don't make much sense, because
# otherwise there's no trailer area for writing the confirmed status.
pad = True
img = image.Image(version=decode_version(version), header_size=header_size,
pad_header=pad_header, pad=pad, confirm=confirm,
align=int(align), slot_size=slot_size,
max_sectors=max_sectors, overwrite_only=overwrite_only,
endian=endian, load_addr=load_addr, erased_val=erased_val,
save_enctlv=save_enctlv,
security_counter=security_counter)
img.load(infile)
key = load_key(key) if key else None
enckey = load_key(encrypt) if encrypt else None
if enckey and key:
if ((isinstance(key, keys.ECDSA256P1) and
not isinstance(enckey, keys.ECDSA256P1Public))
or (isinstance(key, keys.RSA) and
not isinstance(enckey, keys.RSAPublic))):
# FIXME
raise click.UsageError("Signing and encryption must use the same "
"type of key")
if pad_sig and hasattr(key, 'pad_sig'):
key.pad_sig = True
# Get list of custom protected TLVs from the command-line
custom_tlvs = {}
for tlv in custom_tlv:
tag = int(tlv[0], 0)
if tag in custom_tlvs:
raise click.UsageError('Custom TLV %s already exists.' % hex(tag))
if tag in image.TLV_VALUES.values():
raise click.UsageError(
'Custom TLV %s conflicts with predefined TLV.' % hex(tag))
value = tlv[1]
if value.startswith('0x'):
if len(value[2:]) % 2:
raise click.UsageError('Custom TLV length is odd.')
custom_tlvs[tag] = bytes.fromhex(value[2:])
else:
custom_tlvs[tag] = value.encode('utf-8')
img.create(key, public_key_format, enckey, dependencies, boot_record,
custom_tlvs)
img.save(outfile, hex_addr)
class AliasesGroup(click.Group):
_aliases = {
"create": "sign",
}
def list_commands(self, ctx):
cmds = [k for k in self.commands]
aliases = [k for k in self._aliases]
return sorted(cmds + aliases)
def get_command(self, ctx, cmd_name):
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
if cmd_name in self._aliases:
return click.Group.get_command(self, ctx, self._aliases[cmd_name])
return None
@click.command(help='Print imgtool version information')
def version():
print(imgtool_version)
@click.command(cls=AliasesGroup,
context_settings=dict(help_option_names=['-h', '--help']))
def imgtool():
pass
imgtool.add_command(keygen)
imgtool.add_command(getpub)
imgtool.add_command(getpriv)
imgtool.add_command(verify)
imgtool.add_command(sign)
imgtool.add_command(version)
if __name__ == '__main__':
imgtool()
| |
"""Checks use of "too-complex" check"""
def f1():
"""McCabe rating: 1"""
pass
def f2(n):
"""McCabe rating: 1"""
k = n + 4
s = k + n
return s
def f3(n):
"""McCabe rating: 3"""
if n > 3:
return "bigger than three"
elif n > 4:
return "is never executed"
else:
return "smaller than or equal to three"
def f4():
"""McCabe rating: 2"""
for i in range(10):
print(i)
def f5(mylist):
"""McCabe rating: 2"""
for i in mylist:
print(i)
else:
print(None)
def f6(n):
"""McCabe rating: 2"""
if n > 4:
return f(n - 1)
else:
return n
def f7():
"""McCabe rating: 3"""
def b():
"""McCabe rating: 2"""
def c():
"""McCabe rating: 1"""
pass
c()
b()
def f8():
"""McCabe rating: 4"""
try:
print(1)
except TypeA:
print(2)
except TypeB:
print(3)
else:
print(4)
def f9():
"""McCabe rating: 9"""
myint = 2
if myint > 5:
pass
else:
if myint <= 5:
pass
else:
myint = 3
if myint > 2:
if myint > 3:
pass
elif myint == 3:
pass
elif myint < 3:
pass
else:
if myint:
pass
else:
if myint:
pass
myint = 4
def f10():
"""McCabe rating: 11"""
myint = 2
if myint == 5:
return myint
elif myint == 6:
return myint
elif myint == 7:
return myint
elif myint == 8:
return myint
elif myint == 9:
return myint
elif myint == 10:
if myint == 8:
while True:
return True
elif myint == 8:
with myint:
return 8
else:
if myint == 2:
return myint
return myint
return myint
class MyClass1(object):
"""Class of example to test mccabe"""
_name = 'MyClass' # To force a tail.node=None
def method1():
"""McCabe rating: 1"""
pass
def method2(self, param1):
"""McCabe rating: 18"""
if not param1:
pass
pass
if param1:
pass
else:
pass
pass
if param1:
pass
if param1:
pass
if param1:
pass
if param1:
pass
if param1:
pass
if param1:
pass
if param1:
for value in range(5):
pass
pass
for count in range(6):
with open('myfile') as fp:
count += 1
pass
pass
try:
pass
if not param1:
pass
else:
pass
if param1:
raise BaseException('Error')
with open('myfile2') as fp2:
pass
pass
finally:
if param1 is not None:
pass
for count2 in range(8):
try:
pass
except BaseException('Error2'):
pass
return param1
for count in range(10):
if count == 1:
exit(0)
elif count == 2:
exit(1)
else:
exit(2)
def method3(self):
try:
if True:
pass
else:
pass
finally:
pass
return True
| |
import functools
import inspect
import itertools
import sys
from collections import defaultdict
from collections import deque
from collections import OrderedDict
from typing import Dict
from typing import Tuple
import attr
import py
import _pytest
from _pytest import nodes
from _pytest._code.code import FormattedExcinfo
from _pytest._code.code import TerminalRepr
from _pytest.compat import _format_args
from _pytest.compat import _PytestWrapper
from _pytest.compat import FuncargnamesCompatAttr
from _pytest.compat import get_real_func
from _pytest.compat import get_real_method
from _pytest.compat import getfslineno
from _pytest.compat import getfuncargnames
from _pytest.compat import getimfunc
from _pytest.compat import getlocation
from _pytest.compat import is_generator
from _pytest.compat import NOTSET
from _pytest.compat import safe_getattr
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
if False: # TYPE_CHECKING
from typing import Type
@attr.s(frozen=True)
class PseudoFixtureDef:
cached_result = attr.ib()
scope = attr.ib()
def pytest_sessionstart(session):
import _pytest.python
import _pytest.nodes
scopename2class.update(
{
"package": _pytest.python.Package,
"class": _pytest.python.Class,
"module": _pytest.python.Module,
"function": _pytest.nodes.Item,
"session": _pytest.main.Session,
}
)
session._fixturemanager = FixtureManager(session)
scopename2class = {} # type: Dict[str, Type[nodes.Node]]
scope2props = dict(session=()) # type: Dict[str, Tuple[str, ...]]
scope2props["package"] = ("fspath",)
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance",)
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError(
"{} not available in {}-scoped context".format(scopename, self.scope)
)
return property(provide, None, None, func.__doc__)
return decoratescope
def get_scope_package(node, fixturedef):
import pytest
cls = pytest.Package
current = node
fixture_package_name = "{}/{}".format(fixturedef.baseid, "__init__.py")
while current and (
type(current) is not cls or fixture_package_name != current.nodeid
):
current = current.parent
if current is None:
return node.session
return current
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
raise ValueError("unknown scope")
return node.getparent(cls)
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname, scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, _pytest.python.Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(
fixturemanager,
"",
argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist,
False,
False,
)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except TEST_OUTCOME:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indices.items() is random order of argnames. Need to
# sort this so that different calls to
# get_parametrized_fixture_keys will be deterministic.
for argname, param_index in sorted(cs.indices.items()):
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # package
key = (argname, param_index, item.fspath.dirpath())
elif scopenum == 2: # module
key = (argname, param_index, item.fspath)
elif scopenum == 3: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
items_by_argkey = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
items_by_argkey[scopenum] = item_d = defaultdict(deque)
for item in items:
keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
for key in keys:
item_d[key].append(item)
items = OrderedDict.fromkeys(items)
return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0))
def fix_cache_order(item, argkeys_cache, items_by_argkey):
for scopenum in range(0, scopenum_function):
for key in argkeys_cache[scopenum].get(item, []):
items_by_argkey[scopenum][key].appendleft(item)
def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
ignore = set()
items_deque = deque(items)
items_done = OrderedDict()
scoped_items_by_argkey = items_by_argkey[scopenum]
scoped_argkeys_cache = argkeys_cache[scopenum]
while items_deque:
no_argkey_group = OrderedDict()
slicing_argkey = None
while items_deque:
item = items_deque.popleft()
if item in items_done or item in no_argkey_group:
continue
argkeys = OrderedDict.fromkeys(
k for k in scoped_argkeys_cache.get(item, []) if k not in ignore
)
if not argkeys:
no_argkey_group[item] = None
else:
slicing_argkey, _ = argkeys.popitem()
# we don't have to remove relevant items from later in the deque because they'll just be ignored
matching_items = [
i for i in scoped_items_by_argkey[slicing_argkey] if i in items
]
for i in reversed(matching_items):
fix_cache_order(i, argkeys_cache, items_by_argkey)
items_deque.appendleft(i)
break
if no_argkey_group:
no_argkey_group = reorder_items_atscope(
no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1
)
for item in no_argkey_group:
items_done[item] = None
ignore.add(slicing_argkey)
return items_done
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
def get_direct_param_fixture_func(request):
return request.param
@attr.s(slots=True)
class FuncFixtureInfo:
# original function argument names
argnames = attr.ib(type=tuple)
# argnames that function immediately requires. These include argnames +
# fixture names specified via usefixtures and via autouse=True in fixture
# definitions.
initialnames = attr.ib(type=tuple)
names_closure = attr.ib() # List[str]
name2fixturedefs = attr.ib() # List[str, List[FixtureDef]]
def prune_dependency_tree(self):
"""Recompute names_closure from initialnames and name2fixturedefs
Can only reduce names_closure, which means that the new closure will
always be a subset of the old one. The order is preserved.
This method is needed because direct parametrization may shadow some
of the fixtures that were included in the originally built dependency
tree. In this way the dependency tree can get pruned, and the closure
of argnames may get reduced.
"""
closure = set()
working_set = set(self.initialnames)
while working_set:
argname = working_set.pop()
# argname may be smth not included in the original names_closure,
# in which case we ignore it. This currently happens with pseudo
# FixtureDefs which wrap 'get_direct_param_fixture_func(request)'.
# So they introduce the new dependency 'request' which might have
# been missing in the original tree (closure).
if argname not in closure and argname in self.names_closure:
closure.add(argname)
if argname in self.name2fixturedefs:
working_set.update(self.name2fixturedefs[argname][-1].argnames)
self.names_closure[:] = sorted(closure, key=self.names_closure.index)
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._fixture_defs = {} # argname -> FixtureDef
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def fixturenames(self):
"""names of all active fixtures in this request"""
result = list(self._pyfuncitem._fixtureinfo.names_closure)
result.extend(set(self._fixture_defs).difference(result))
return result
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a dynamic call to
# getfixturevalue(argname) usage which was naturally
# not known at parsing/collection time
parentid = self._pyfuncitem.parent.nodeid
fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
return getattr(function, "__self__", None)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem
)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
self.node.add_marker(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfixturevalue(argname)
def getfixturevalue(self, argname):
""" Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixture_defs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef(cached_result, scope)
raise
# remove indent to prevent the python3 exception
# from leaking into the call
self._compute_fixture_value(fixturedef)
self._fixture_defs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
values = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
values.reverse()
return values
values.append(fixturedef)
current = current._parent_request
def _compute_fixture_value(self, fixturedef):
"""
Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will
force the FixtureDef object to throw away any previous results and compute a new fixture value, which
will be stored into the FixtureDef object itself.
:param FixtureDef fixturedef:
"""
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
has_params = fixturedef.params is not None
fixtures_not_supported = getattr(funcitem, "nofuncargs", False)
if has_params and fixtures_not_supported:
msg = (
"{name} does not support fixtures, maybe unittest.TestCase subclass?\n"
"Node id: {nodeid}\n"
"Function type: {typename}"
).format(
name=funcitem.name,
nodeid=funcitem.nodeid,
typename=type(funcitem).__name__,
)
fail(msg, pytrace=False)
if has_params:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = frameinfo.filename
source_lineno = frameinfo.lineno
source_path = py.path.local(source_path)
if source_path.relto(funcitem.config.rootdir):
source_path = source_path.relto(funcitem.config.rootdir)
msg = (
"The requested fixture has no parameter defined for test:\n"
" {}\n\n"
"Requested fixture '{}' defined in:\n{}"
"\n\nRequested here:\n{}:{}".format(
funcitem.nodeid,
fixturedef.argname,
getlocation(fixturedef.func, funcitem.config.rootdir),
source_path,
source_lineno,
)
)
fail(msg, pytrace=False)
else:
param_index = funcitem.callspec.indices[argname]
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
try:
# call the fixture function
fixturedef.execute(request=subrequest)
finally:
self._schedule_finalizers(fixturedef, subrequest)
def _schedule_finalizers(self, fixturedef, subrequest):
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(
functools.partial(fixturedef.finish, request=subrequest), subrequest.node
)
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
fail(
"ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s"
% ((requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False,
)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
if scope == "package":
node = get_scope_package(self._pyfuncitem, self._fixturedef)
else:
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(
scope, self._pyfuncitem
)
return node
def __repr__(self):
return "<FixtureRequest for %r>" % (self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self._pyfuncitem = request._pyfuncitem
self._fixture_defs = request._fixture_defs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest {!r} for {!r}>".format(self.fixturename, self._pyfuncitem)
def addfinalizer(self, finalizer):
self._fixturedef.addfinalizer(finalizer)
def _schedule_finalizers(self, fixturedef, subrequest):
# if the executing fixturedef was not explicitly requested in the argument list (via
# getfixturevalue inside the fixture call) then ensure this fixture def will be finished
# first
if fixturedef.argname not in self.fixturenames:
fixturedef.addfinalizer(
functools.partial(self._fixturedef.finish, request=self)
)
super()._schedule_finalizers(fixturedef, subrequest)
scopes = "session package module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
def scope2index(scope, descr, where=None):
"""Look up the index of ``scope`` and raise a descriptive value error
if not defined.
"""
try:
return scopes.index(scope)
except ValueError:
fail(
"{} {}got an unexpected scope value '{}'".format(
descr, "from {} ".format(where) if where else "", scope
),
pytrace=False,
)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError, TypeError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno + 1))
else:
addline("file {}, line {}".format(fspath, lineno + 1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith("def"):
break
if msg is None:
fm = self.request._fixturemanager
available = set()
parentid = self.request._pyfuncitem.parent.nodeid
for name, fixturedefs in fm._arg2fixturedefs.items():
faclist = list(fm._matchfactories(fixturedefs, parentid))
if faclist:
available.add(name)
if self.argname in available:
msg = " recursive dependency involving fixture '{}' detected".format(
self.argname
)
else:
msg = "fixture '{}' not found".format(self.argname)
msg += "\n available fixtures: {}".format(", ".join(sorted(available)))
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
lines = self.errorstring.split("\n")
if lines:
tw.line(
"{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()),
red=True,
)
for line in lines[1:]:
tw.line(
"{} {}".format(FormattedExcinfo.flow_marker, line.strip()),
red=True,
)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "{}:{}".format(fs, lineno + 1)
source = _pytest._code.Source(fixturefunc)
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
it = fixturefunc(**kwargs)
res = next(it)
finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, it)
request.addfinalizer(finalizer)
else:
res = fixturefunc(**kwargs)
return res
def _teardown_yield_fixture(fixturefunc, it):
"""Executes the teardown of a fixture function by advancing the iterator after the
yield and ensure the iteration ends (if not it means there is more than one yield in the function)"""
try:
next(it)
except StopIteration:
pass
else:
fail_fixturefunc(
fixturefunc, "yield_fixture function has more than one 'yield'"
)
class FixtureDef:
""" A container for a factory definition. """
def __init__(
self,
fixturemanager,
baseid,
argname,
func,
scope,
params,
unittest=False,
ids=None,
):
self._fixturemanager = fixturemanager
self.baseid = baseid or ""
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scope2index(
scope or "function",
descr="Fixture '{}'".format(func.__name__),
where=baseid,
)
self.params = params
self.argnames = getfuncargnames(func, name=argname, is_method=unittest)
self.unittest = unittest
self.ids = ids
self._finalizers = []
def addfinalizer(self, finalizer):
self._finalizers.append(finalizer)
def finish(self, request):
exceptions = []
try:
while self._finalizers:
try:
func = self._finalizers.pop()
func()
except: # noqa
exceptions.append(sys.exc_info())
if exceptions:
_, val, tb = exceptions[0]
# Ensure to not keep frame references through traceback.
del exceptions
raise val.with_traceback(tb)
finally:
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
hook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
# even if finalization fails, we invalidate
# the cached fixture value and remove
# all finalizers because they may be bound methods which will
# keep instances alive
if hasattr(self, "cached_result"):
del self.cached_result
self._finalizers = []
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
fixturedef.addfinalizer(functools.partial(self.finish, request=request))
my_cache_key = self.cache_key(request)
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
_, val, tb = err
raise val.with_traceback(tb)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish(request)
assert not hasattr(self, "cached_result")
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
return hook.pytest_fixture_setup(fixturedef=self, request=request)
def cache_key(self, request):
return request.param_index if not hasattr(request, "param") else request.param
def __repr__(self):
return "<FixtureDef argname={!r} scope={!r} baseid={!r}>".format(
self.argname, self.scope, self.baseid
)
def resolve_fixture_function(fixturedef, request):
"""Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific
instances and bound methods.
"""
fixturefunc = fixturedef.func
if fixturedef.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = fixturedef.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "fixturedef" behaves
# as expected.
if request.instance is not None:
# handle the case where fixture is defined not in a test class, but some other class
# (for example a plugin class with a fixture), see #2270
if hasattr(fixturefunc, "__self__") and not isinstance(
request.instance, fixturefunc.__self__.__class__
):
return fixturefunc
fixturefunc = getimfunc(fixturedef.func)
if fixturefunc != fixturedef.func:
fixturefunc = fixturefunc.__get__(request.instance)
return fixturefunc
def pytest_fixture_setup(fixturedef, request):
""" Execution of fixture setup. """
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixdef.cached_result
request._check_scope(argname, request.scope, fixdef.scope)
kwargs[argname] = result
fixturefunc = resolve_fixture_function(fixturedef, request)
my_cache_key = fixturedef.cache_key(request)
try:
result = call_fixture_func(fixturefunc, request, kwargs)
except TEST_OUTCOME:
fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
raise
fixturedef.cached_result = (result, my_cache_key, None)
return result
def _ensure_immutable_ids(ids):
if ids is None:
return
if callable(ids):
return ids
return tuple(ids)
def wrap_function_to_error_out_if_called_directly(function, fixture_marker):
"""Wrap the given fixture function so we can raise an error about it being called directly,
instead of used as an argument in a test function.
"""
message = (
'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n'
"but are created automatically when test functions request them as parameters.\n"
"See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and\n"
"https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code."
).format(name=fixture_marker.name or function.__name__)
@functools.wraps(function)
def result(*args, **kwargs):
fail(message, pytrace=False)
# keep reference to the original function in our own custom attribute so we don't unwrap
# further than this point and lose useful wrappings like @mock.patch (#3774)
result.__pytest_wrapped__ = _PytestWrapper(function)
return result
@attr.s(frozen=True)
class FixtureFunctionMarker:
scope = attr.ib()
params = attr.ib(converter=attr.converters.optional(tuple))
autouse = attr.ib(default=False)
# Ignore type because of https://github.com/python/mypy/issues/6172.
ids = attr.ib(default=None, converter=_ensure_immutable_ids) # type: ignore
name = attr.ib(default=None)
def __call__(self, function):
if inspect.isclass(function):
raise ValueError("class fixtures not supported (maybe in the future)")
if getattr(function, "_pytestfixturefunction", False):
raise ValueError(
"fixture is being applied more than once to the same function"
)
function = wrap_function_to_error_out_if_called_directly(function, self)
name = self.name or function.__name__
if name == "request":
location = getlocation(function)
fail(
"'request' is a reserved word for fixtures, use another name:\n {}".format(
location
),
pytrace=False,
)
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
"""Decorator to mark a fixture factory function.
This decorator can be used, with or without parameters, to define a
fixture function.
The name of the fixture function can later be referenced to cause its
invocation ahead of running tests: test
modules or classes can use the ``pytest.mark.usefixtures(fixturename)``
marker.
Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
Fixtures can provide their values to test functions using ``return`` or ``yield``
statements. When using ``yield`` the code block after the ``yield`` statement is executed
as teardown code regardless of the test outcome, and must yield exactly once.
:arg scope: the scope for which this fixture is shared, one of
``"function"`` (default), ``"class"``, ``"module"``,
``"package"`` or ``"session"``.
``"package"`` is considered **experimental** at this time.
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
The current parameter is available in ``request.param``.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
:arg name: the name of the fixture. This defaults to the name of the
decorated function. If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
"""
if callable(scope) and params is None and autouse is False:
# direct decoration
return FixtureFunctionMarker("function", params, autouse, name=name)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
return fixture(scope=scope, params=params, autouse=autouse, ids=ids, name=name)
defaultfuncargprefixmarker = fixture()
@fixture(scope="session")
def pytestconfig(request):
"""Session-scoped fixture that returns the :class:`_pytest.config.Config` object.
Example::
def test_foo(pytestconfig):
if pytestconfig.getoption("verbose") > 0:
...
"""
return request.config
def pytest_addoption(parser):
parser.addini(
"usefixtures",
type="args",
default=[],
help="list of default fixtures to be used with this project",
)
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def _get_direct_parametrize_args(self, node):
"""This function returns all the direct parametrization
arguments of a node, so we don't mistake them for fixtures
Check https://github.com/pytest-dev/pytest/issues/5036
This things are done later as well when dealing with parametrization
so this could be improved
"""
from _pytest.mark import ParameterSet
parametrize_argnames = []
for marker in node.iter_markers(name="parametrize"):
if not marker.kwargs.get("indirect", False):
p_argnames, _ = ParameterSet._parse_parametrize_args(
*marker.args, **marker.kwargs
)
parametrize_argnames.extend(p_argnames)
return parametrize_argnames
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not getattr(node, "nofuncargs", False):
argnames = getfuncargnames(func, name=node.name, cls=cls)
else:
argnames = ()
usefixtures = itertools.chain.from_iterable(
mark.args for mark in node.iter_markers(name="usefixtures")
)
initialnames = tuple(usefixtures) + argnames
fm = node.session._fixturemanager
initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure(
initialnames, node, ignore_args=self._get_direct_parametrize_args(node)
)
return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__).realpath()
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != nodes.SEP:
nodeid = nodeid.replace(p.sep, nodes.SEP)
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i : i + 1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode, ignore_args=()):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return an arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
# at this point, fixturenames_closure contains what we call "initialnames",
# which is a set of fixturenames the function immediately requests. We
# need to return it as well, so save this.
initialnames = tuple(fixturenames_closure)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in ignore_args:
continue
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
def sort_by_scope(arg_name):
try:
fixturedefs = arg2fixturedefs[arg_name]
except KeyError:
return scopes.index("function")
else:
return fixturedefs[-1].scopenum
fixturenames_closure.sort(key=sort_by_scope)
return initialnames, fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
markers = list(metafunc.definition.iter_markers("parametrize"))
for parametrize_mark in markers:
if "argnames" in parametrize_mark.kwargs:
argnames = parametrize_mark.kwargs["argnames"]
else:
argnames = parametrize_mark.args[0]
if not isinstance(argnames, (tuple, list)):
argnames = [
x.strip() for x in argnames.split(",") if x.strip()
]
if argname in argnames:
break
else:
metafunc.parametrize(
argname,
fixturedef.params,
indirect=True,
scope=fixturedef.scope,
ids=fixturedef.ids,
)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
# The attribute can be an arbitrary descriptor, so the attribute
# access below can raise. safe_getatt() ignores such exceptions.
obj = safe_getattr(holderobj, name, None)
marker = getfixturemarker(obj)
if not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
if marker.name:
name = marker.name
# during fixture definition we wrap the original fixture function
# to issue a warning if called directly, so here we unwrap it in order to not emit the warning
# when pytest itself calls the fixture function
obj = get_real_method(obj, holderobj)
fixture_def = FixtureDef(
self,
nodeid,
name,
obj,
marker.scope,
marker.params,
unittest=unittest,
ids=marker.ids,
)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixture_def)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or "", autousenames))
def getfixturedefs(self, argname, nodeid):
"""
Gets a list of fixtures which are applicable to the given node id.
:param str argname: name of the fixture to search for
:param str nodeid: full node id of the requesting test.
:return: list[FixtureDef]
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodes.ischildnode(fixturedef.baseid, nodeid):
yield fixturedef
| |
#! /usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Instructs Chrome to load series of web pages and reports results.
When running Chrome is sandwiched between preprocessed disk caches and
WepPageReplay serving all connections.
TODO(pasko): implement cache preparation and WPR.
"""
import argparse
import csv
import logging
import os
import shutil
import sys
_SRC_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(_SRC_DIR, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
sys.path.append(os.path.join(_SRC_DIR, 'build', 'android'))
from pylib import constants
import devil_chromium
import chrome_cache
import common_util
import emulation
import options
import sandwich_metrics
import sandwich_misc
from sandwich_runner import SandwichRunner
from trace_test.webserver_test import WebServer
# Use options layer to access constants.
OPTIONS = options.OPTIONS
def _ArgumentParser():
"""Build a command line argument's parser."""
# Command parser when dealing with jobs.
common_job_parser = argparse.ArgumentParser(add_help=False)
common_job_parser.add_argument('--job', required=True,
help='JSON file with job description.')
# Plumbing parser to configure OPTIONS.
plumbing_parser = OPTIONS.GetParentParser('plumbing options')
# Main parser
parser = argparse.ArgumentParser(parents=[plumbing_parser])
subparsers = parser.add_subparsers(dest='subcommand', help='subcommand line')
# Record WPR subcommand.
record_wpr = subparsers.add_parser('record-wpr', parents=[common_job_parser],
help='Record WPR from sandwich job.')
record_wpr.add_argument('--wpr-archive', required=True, type=str,
dest='wpr_archive_path',
help='Web page replay archive to generate.')
# Patch WPR subcommand.
patch_wpr = subparsers.add_parser('patch-wpr',
help='Patch WPR response headers.')
patch_wpr.add_argument('--wpr-archive', required=True, type=str,
dest='wpr_archive_path',
help='Web page replay archive to patch.')
# Create cache subcommand.
create_cache_parser = subparsers.add_parser('create-cache',
parents=[common_job_parser],
help='Create cache from sandwich job.')
create_cache_parser.add_argument('--cache-archive', required=True, type=str,
dest='cache_archive_path',
help='Cache archive destination path.')
create_cache_parser.add_argument('--wpr-archive', default=None, type=str,
dest='wpr_archive_path',
help='Web page replay archive to create ' +
'the cache from.')
# Run subcommand.
run_parser = subparsers.add_parser('run', parents=[common_job_parser],
help='Run sandwich benchmark.')
run_parser.add_argument('--output', required=True, type=str,
dest='trace_output_directory',
help='Path of output directory to create.')
run_parser.add_argument('--cache-archive', type=str,
dest='cache_archive_path',
help='Cache archive destination path.')
run_parser.add_argument('--cache-op',
choices=['clear', 'push', 'reload'],
dest='cache_operation',
default='clear',
help='Configures cache operation to do before '
+'launching Chrome. (Default is clear). The push'
+' cache operation requires --cache-archive to '
+'set.')
run_parser.add_argument('--disable-wpr-script-injection',
action='store_true',
help='Disable WPR default script injection such as ' +
'overriding javascript\'s Math.random() and ' +
'Date() with deterministic implementations.')
run_parser.add_argument('--network-condition', default=None,
choices=sorted(emulation.NETWORK_CONDITIONS.keys()),
help='Set a network profile.')
run_parser.add_argument('--network-emulator', default='browser',
choices=['browser', 'wpr'],
help='Set which component is emulating the network condition.' +
' (Default to browser). Wpr network emulator requires --wpr-archive' +
' to be set.')
run_parser.add_argument('--job-repeat', default=1, type=int,
help='How many times to run the job.')
run_parser.add_argument('--record-video', action='store_true',
help='Configures either to record or not a video of '
+'chrome loading the web pages.')
run_parser.add_argument('--wpr-archive', default=None, type=str,
dest='wpr_archive_path',
help='Web page replay archive to load job\'s urls ' +
'from.')
# Pull metrics subcommand.
create_cache_parser = subparsers.add_parser('extract-metrics',
help='Extracts metrics from a loading trace and saves as CSV.')
create_cache_parser.add_argument('--trace-directory', required=True,
dest='trace_output_directory', type=str,
help='Path of loading traces directory.')
create_cache_parser.add_argument('--out-metrics', default=None, type=str,
dest='metrics_csv_path',
help='Path where to save the metrics\'s '+
'CSV.')
# Filter cache subcommand.
filter_cache_parser = subparsers.add_parser('filter-cache',
help='Cache filtering that keeps only resources discoverable by the HTML'+
' document parser.')
filter_cache_parser.add_argument('--cache-archive', type=str, required=True,
dest='cache_archive_path',
help='Path of the cache archive to filter.')
filter_cache_parser.add_argument('--subresource-discoverer', required=True,
help='Strategy for populating the cache with a subset of resources, '
'according to the way they can be discovered',
choices=sandwich_misc.SUBRESOURCE_DISCOVERERS)
filter_cache_parser.add_argument('--output', type=str, required=True,
dest='output_cache_archive_path',
help='Path of filtered cache archive.')
filter_cache_parser.add_argument('loading_trace_paths', type=str, nargs='+',
metavar='LOADING_TRACE',
help='A list of loading traces generated by a sandwich run for a given' +
' url. This is used to have a resource dependency graph to white-' +
'list the ones discoverable by the HTML pre-scanner for that given ' +
'url.')
# Record test trace subcommand.
record_trace_parser = subparsers.add_parser('record-test-trace',
help='Record a test trace using the trace_test.webserver_test.')
record_trace_parser.add_argument('--source-dir', type=str, required=True,
help='Base path where the files are opened'
'by the web server.')
record_trace_parser.add_argument('--page', type=str, required=True,
help='Source page in source-dir to navigate '
'to.')
record_trace_parser.add_argument('-o', '--output', type=str, required=True,
help='Output path of the generated trace.')
return parser
def _RecordWprMain(args):
sandwich_runner = SandwichRunner()
sandwich_runner.LoadJob(args.job)
sandwich_runner.PullConfigFromArgs(args)
sandwich_runner.wpr_record = True
sandwich_runner.PrintConfig()
if not os.path.isdir(os.path.dirname(args.wpr_archive_path)):
os.makedirs(os.path.dirname(args.wpr_archive_path))
sandwich_runner.Run()
return 0
def _CreateCacheMain(args):
sandwich_runner = SandwichRunner()
sandwich_runner.LoadJob(args.job)
sandwich_runner.PullConfigFromArgs(args)
sandwich_runner.cache_operation = 'save'
sandwich_runner.PrintConfig()
if not os.path.isdir(os.path.dirname(args.cache_archive_path)):
os.makedirs(os.path.dirname(args.cache_archive_path))
sandwich_runner.Run()
return 0
def _RunJobMain(args):
sandwich_runner = SandwichRunner()
sandwich_runner.LoadJob(args.job)
sandwich_runner.PullConfigFromArgs(args)
sandwich_runner.PrintConfig()
sandwich_runner.Run()
return 0
def _ExtractMetricsMain(args):
trace_metrics_list = sandwich_metrics.PullMetricsFromOutputDirectory(
args.trace_output_directory)
trace_metrics_list.sort(key=lambda e: e['id'])
with open(args.metrics_csv_path, 'w') as csv_file:
writer = csv.DictWriter(csv_file,
fieldnames=sandwich_metrics.CSV_FIELD_NAMES)
writer.writeheader()
for trace_metrics in trace_metrics_list:
writer.writerow(trace_metrics)
return 0
def _FilterCacheMain(args):
whitelisted_urls = set()
for loading_trace_path in args.loading_trace_paths:
whitelisted_urls.update(sandwich_misc.ExtractDiscoverableUrls(
loading_trace_path, args.subresource_discoverer))
if not os.path.isdir(os.path.dirname(args.output_cache_archive_path)):
os.makedirs(os.path.dirname(args.output_cache_archive_path))
chrome_cache.ApplyUrlWhitelistToCacheArchive(args.cache_archive_path,
whitelisted_urls,
args.output_cache_archive_path)
return 0
def _RecordWebServerTestTrace(args):
with common_util.TemporaryDirectory() as out_path:
sandwich_runner = SandwichRunner()
# Reuse the WPR's forwarding to access the webpage from Android.
sandwich_runner.wpr_record = True
sandwich_runner.wpr_archive_path = os.path.join(out_path, 'wpr')
sandwich_runner.trace_output_directory = os.path.join(out_path, 'run')
with WebServer.Context(
source_dir=args.source_dir, communication_dir=out_path) as server:
address = server.Address()
sandwich_runner.urls = ['http://%s/%s' % (address, args.page)]
sandwich_runner.Run()
shutil.copy(os.path.join(out_path, 'run', '0', 'trace.json'), args.output)
return 0
def main(command_line_args):
logging.basicConfig(level=logging.INFO)
devil_chromium.Initialize()
args = _ArgumentParser().parse_args(command_line_args)
OPTIONS.SetParsedArgs(args)
if args.subcommand == 'record-wpr':
return _RecordWprMain(args)
if args.subcommand == 'patch-wpr':
sandwich_misc.PatchWpr(args.wpr_archive_path)
return 0
if args.subcommand == 'create-cache':
return _CreateCacheMain(args)
if args.subcommand == 'run':
return _RunJobMain(args)
if args.subcommand == 'extract-metrics':
return _ExtractMetricsMain(args)
if args.subcommand == 'filter-cache':
return _FilterCacheMain(args)
if args.subcommand == 'record-test-trace':
return _RecordWebServerTestTrace(args)
assert False
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| |
#!/usr/bin/env python -W ignore::DeprecationWarning
'''
KPCA based feature engineering for 20-newsgroup document classification with
combination of kernels in each layers
Author : Akhil P M
Kernel used : Arc-cosine Kernel, Gaussian Kernel, Polynomial kernel
'''
import kernel
from settings import *
from umkl_new import *
from getdata_20NG import *
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
n=3000
n_kernels = 4
D = np.zeros((n,n))
M = np.zeros((n,n))
P = np.zeros((n,n))
matP = np.zeros((n_kernels, n_kernels))
vecQ = np.zeros((n_kernels,1))
gamma = 0.01
names = ["Linear SVM", "Decision Tree", "Random Forest",
"AdaBoost Classifier", "Logistic Regression"]
classifiers = [
SVC(kernel="linear", C=3.4,gamma=0.1),
DecisionTreeClassifier(),
RandomForestClassifier(n_estimators=300, n_jobs=-1),
AdaBoostClassifier(n_estimators=70),
LogisticRegression(random_state=1, C=0.4)]
def compute_J(N, theta):
if N == 0:
return np.pi - theta
elif N == 1:
return np.sin(theta) + (np.pi - theta) * np.cos(theta)
elif N == 2:
return 3*np.sin(theta)*np.cos(theta) + (np.pi - theta)*(1 + 2*pow(np.cos(theta), 2))
elif N == 3:
return 4*pow(np.sin(theta), 3) + 15*np.sin(theta)*pow(np.cos(theta), 2) + \
(np.pi- theta)*(9*pow(np.sin(theta),2)*np.cos(theta) + 15*pow(np.cos(theta),3))
else:
return np.zeros(theta.shape)
def arc_cosine_vector(X, Y):
"""param = a vector of n(degree) values at each layer """
param = np.array([0, 3, 3])
no_of_layers = len(param)
M = np.dot(X, Y.T)
temp1 = np.diag(np.dot(X, X.T))
temp2 = np.diag(np.dot(Y, Y.T))
for i in xrange(no_of_layers):
norm_matrix = np.outer(temp1,temp2) #the matix of k_xx, and K_yy's
theta = np.arccos( np.maximum( np.minimum(M/np.sqrt(norm_matrix), 1.0), -1.0))
n_l = param[i]
M = np.multiply(np.power(norm_matrix, n_l/2.0), compute_J(n_l, theta)) / np.pi
if i < no_of_layers-1:
zero1 = np.zeros(len(temp1))
zero2 = np.zeros(len(temp2))
temp1 = np.multiply(np.power(temp1, n_l), compute_J(n_l, zero1)) / np.pi
temp2 = np.multiply(np.power(temp2, n_l), compute_J(n_l, zero2)) / np.pi
return M
def arc_cosine(X, Y):
lenX = X.shape[0]
incr = 1000
M = np.zeros((lenX, Y.shape[0]))
for i in range(0,lenX,incr):
M[i:i+incr] = arc_cosine_vector(X[i:i+incr], Y)
return M
def stratified_sampling(trainX, trainY):
""" stratified inputs for KPCA is extracted by this function """
no_of_classes = len(np.unique(trainY))
representers = np.zeros(no_of_classes)
no_of_reps = int(3000/no_of_classes)
kpcaX = np.zeros((3000, trainX.shape[1]))
count = 0
index = 0
for i in xrange(trainX.shape[0]):
label = trainY[i]
if representers[label] < no_of_reps:
kpcaX[index] = trainX[i]
index += 1
representers[label] += 1
if representers[label] == no_of_reps:
count += 1
if count == no_of_classes:
break
return kpcaX
def uncertainity_sampling(trainX, trainY):
""" sample most uncertain points using active learning techniques,
specifically using label propagation algorithm """
n_total_samples = len(trainY)
n_labeled_points = 100
unlabelled_indices = np.arange(n_total_samples)[n_labeled_points:]
y_train = np.copy(trainY)
y_train[unlabelled_indices] = -1
lp_model = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
lp_model.fit(trainX, y_train)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# select 3000 digit examples that the classifier is most uncertain about
uncertainty_index = np.argsort(pred_entropies)[-3000:]
print(uncertainty_index)
kpcaX = trainx[uncertainty_index]
print(kpcaX.shape)
return kpcaX
def multi_KPCA(trainX, trainY, testX, testY, param, k_type, layer):
""" KPCA using combination of kernels """
kpca = KernelPCA(kernel='precomputed')
#kpcaX = stratified_sampling(trainX, trainY) #trainX[0:3000]
kpcaX = trainX[0:3000]
kpcaY = trainY[0:3000]
#for i in range(10):
# print np.sum(kpcaY==i),
kpca_train = np.zeros((3000, 3000))
kernel_train = np.zeros((trainX.shape[0], 3000))
kernel_test = np.zeros((testX.shape[0], 3000))
#get the coefficients
mu = getUMKL_coefficients(trainX[:n], k_type, param, layer)
print(mu)
kpca_train = getUMKL_gram_matrix(kpcaX, kpcaX, k_type, param, mu, layer)
kernel_train = getUMKL_gram_matrix(trainX, kpcaX, k_type, param, mu, layer)
kernel_test = getUMKL_gram_matrix(testX, kpcaX, k_type, param, mu, layer)
kpca.fit(kpca_train)
trainX_kpca = kpca.transform(kernel_train)
testX_kpca = kpca.transform(kernel_test)
gc.collect()
get_individual_kernel_performance(kpcaX, trainX, trainY, testX, testY, k_type, param, mu, layer)
return trainX_kpca, testX_kpca
def read_cmd_arguments(no_of_layers, no_of_kernels):
""" get parameters of each layer as cmd arguments"""
config = sys.argv[1]
param = genfromtxt(config, delimiter=',')
print(param)
k_type = genfromtxt('kernels.csv', delimiter=',')
return param, k_type
def main():
#ignore all warnings
warnings.filterwarnings("ignore")
#set the parameters
no_of_layers = 4
no_of_kernels = 5
kparam = np.array([0,3,3])
""" param = a vector of kernel parameter values at each layer """
param, k_type = read_cmd_arguments(no_of_layers, no_of_kernels)
# parse commandline arguments
op = OptionParser()
op.add_option("--report",action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select", action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--top10",action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"" for every classifier.")
op.add_option("--all_categories",action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing", action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features", action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered", action="store_true",
help="Remove newsgroup information that easily overfits: ""headers, signatures, and quoting.")
#set the timer
start = time()
#get the data after preprocessing
trainX, testX, trainY, testY = get20newsgroup_data(op)
print('\n!!! Data Loading Completed !!!\n')
#shuffle the training data
shuffle = np.random.permutation(trainX.shape[0])
trainX = trainX[shuffle]
trainY = trainY[shuffle]
selector = SelectPercentile(f_classif, percentile=5)
#extract the features using KPCA
for i in xrange(no_of_layers):
trainX_kpca, testX_kpca = multi_KPCA(trainX, trainY, testX, testY, param[i], k_type[i], i+1)
selector.fit(trainX_kpca, trainY)
trainX = selector.transform(trainX_kpca)
testX = selector.transform(testX_kpca)
clf = SVC(kernel="linear", C=3.4)
clf.fit(trainX, trainY)
pred = clf.predict(testX)
print(accuracy_score(testY, pred))
print(trainX_kpca.shape)
print(trainX.shape)
print('============================ Layer %d Completed ============================' %(i+1))
print(testX.shape)
#save the new featurset for further exploration
np.save('trainX_feat', trainX)
np.save('testX_feat', testX)
np.save('trainY_feat', trainY)
np.save('testY_feat', testY)
#fit the svm model and compute accuaracy measure
parameters = {'n_neighbors' : list(np.arange(20)+1)}
#clf = GridSearchCV(KNeighborsClassifier(weights='distance', n_jobs=-1), parameters)
#clf = svm.SVC(kernel=arc_cosine, cache_size=2048)
#clf.fit(trainX, trainY)
for name, clf in zip(names, classifiers):
clf.fit(trainX, trainY)
pred = clf.predict(testX)
print('classifier : %s, Accuracy :%f%% ' %(name, accuracy_score(testY, pred)*100))
print('total : %d, correct : %d, incorrect : %d\n' %(len(pred), np.sum(pred == testY), np.sum(pred != testY)))
#pred = clf.predict(testX)
#print(accuracy_score(testY, pred))
#print(confusion_matrix(testY, pred))
#print(clf.best_params_)
#print('total : %d, correct : %d, incorrect : %d\n' %(len(pred), np.sum(pred == testY), np.sum(pred != testY)))
print('Test Time : %f Minutes\n' %((time()-start)/60))
print('completed time ' + str(datetime.now().hour) + ':' + str(datetime.now().minute))
if __name__ == '__main__':
main()
| |
from __future__ import absolute_import, division, print_function
import os
from ...external.qt.QtGui import (
QMainWindow, QMessageBox, QWidget)
from ...external.qt.QtCore import Qt
from ...core.application_base import ViewerBase
from ..decorators import set_cursor
from ..layer_artist_model import QtLayerArtistContainer, LayerArtistView
from .. import get_qapp
from ..mime import LAYERS_MIME_TYPE, LAYER_MIME_TYPE
from .glue_mdi_area import GlueMdiSubWindow
__all__ = ['DataViewer']
class DataViewer(QMainWindow, ViewerBase):
"""Base class for all Qt DataViewer widgets.
This defines a minimal interface, and implemlements the following::
* An automatic call to unregister on window close
* Drag and drop support for adding data
"""
_container_cls = QtLayerArtistContainer
LABEL = 'Override this'
def __init__(self, session, parent=None):
"""
:type session: :class:`~glue.core.Session`
"""
QMainWindow.__init__(self, parent)
ViewerBase.__init__(self, session)
self.setWindowIcon(get_qapp().windowIcon())
self._view = LayerArtistView()
self._view.setModel(self._container.model)
self._tb_vis = {} # store whether toolbars are enabled
self.setAttribute(Qt.WA_DeleteOnClose)
self.setAcceptDrops(True)
self.setAnimated(False)
self._toolbars = []
self._warn_close = True
self.setContentsMargins(2, 2, 2, 2)
self._mdi_wrapper = None # GlueMdiSubWindow that self is embedded in
self.statusBar().setStyleSheet("QStatusBar{font-size:10px}")
# close window when last plot layer deleted
self._container.on_empty(lambda: self.close(warn=False))
self._container.on_changed(self.update_window_title)
def remove_layer(self, layer):
self._container.pop(layer)
def dragEnterEvent(self, event):
""" Accept the event if it has data layers"""
if event.mimeData().hasFormat(LAYER_MIME_TYPE):
event.accept()
elif event.mimeData().hasFormat(LAYERS_MIME_TYPE):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
""" Add layers to the viewer if contained in mime data """
if event.mimeData().hasFormat(LAYER_MIME_TYPE):
self.request_add_layer(event.mimeData().data(LAYER_MIME_TYPE))
assert event.mimeData().hasFormat(LAYERS_MIME_TYPE)
for layer in event.mimeData().data(LAYERS_MIME_TYPE):
self.request_add_layer(layer)
event.accept()
def mousePressEvent(self, event):
""" Consume mouse press events, and prevent them from propagating
down to the MDI area """
event.accept()
apply_roi = set_cursor(Qt.WaitCursor)(ViewerBase.apply_roi)
def close(self, warn=True):
self._warn_close = warn
super(DataViewer, self).close()
self._warn_close = True
def mdi_wrap(self):
"""Wrap this object in a GlueMdiSubWindow"""
sub = GlueMdiSubWindow()
sub.setWidget(self)
self.destroyed.connect(sub.close)
sub.resize(self.size())
self._mdi_wrapper = sub
return sub
@property
def position(self):
target = self._mdi_wrapper or self
pos = target.pos()
return pos.x(), pos.y()
@position.setter
def position(self, xy):
x, y = xy
self.move(x, y)
def move(self, x=None, y=None):
"""
Move the viewer to a new XY pixel location
You can also set the position attribute to a new tuple directly.
Parameters
----------
x : int (optional)
New x position
y : int (optional)
New y position
"""
x0, y0 = self.position
if x is None:
x = x0
if y is None:
y = y0
if self._mdi_wrapper is not None:
self._mdi_wrapper.move(x, y)
else:
QMainWindow.move(self, x, y)
@property
def viewer_size(self):
sz = QMainWindow.size(self)
return sz.width(), sz.height()
@viewer_size.setter
def viewer_size(self, value):
width, height = value
self.resize(width, height)
if self._mdi_wrapper is not None:
self._mdi_wrapper.resize(width, height)
def closeEvent(self, event):
""" Call unregister on window close """
if not self._confirm_close():
event.ignore()
return
if self._hub is not None:
self.unregister(self._hub)
super(DataViewer, self).closeEvent(event)
event.accept()
def _confirm_close(self):
"""Ask for close confirmation
:rtype: bool. True if user wishes to close. False otherwise
"""
if self._warn_close and (not os.environ.get('GLUE_TESTING')) and self.isVisible():
buttons = QMessageBox.Ok | QMessageBox.Cancel
dialog = QMessageBox.warning(self, "Confirm Close",
"Do you want to close this window?",
buttons=buttons,
defaultButton=QMessageBox.Cancel)
return dialog == QMessageBox.Ok
return True
def _confirm_large_data(self, data):
warn_msg = ("WARNING: Data set has %i points, and may render slowly."
" Continue?" % data.size)
title = "Add large data set?"
ok = QMessageBox.Ok
cancel = QMessageBox.Cancel
buttons = ok | cancel
result = QMessageBox.question(self, title, warn_msg,
buttons=buttons,
defaultButton=cancel)
return result == ok
def layer_view(self):
return self._view
def options_widget(self):
return QWidget()
def addToolBar(self, tb):
super(DataViewer, self).addToolBar(tb)
self._toolbars.append(tb)
self._tb_vis[tb] = True
def show_toolbars(self):
"""Re-enable any toolbars that were hidden with `hide_toolbars()`
Does not re-enable toolbars that were hidden by other means
"""
for tb in self._toolbars:
if self._tb_vis.get(tb, False):
tb.setEnabled(True)
def hide_toolbars(self):
""" Disable all the toolbars in the viewer.
This action can be reversed by calling `show_toolbars()`
"""
for tb in self._toolbars:
self._tb_vis[tb] = self._tb_vis.get(tb, False) or tb.isVisible()
tb.setEnabled(False)
def set_focus(self, state):
if state:
css = """
DataViewer
{
border: 2px solid;
border-color: rgb(56, 117, 215);
}
"""
self.setStyleSheet(css)
self.show_toolbars()
else:
css = """
DataViewer
{
border: none;
}
"""
self.setStyleSheet(css)
self.hide_toolbars()
def __str__(self):
return self.LABEL
def unregister(self, hub):
"""
Override to perform cleanup operations when disconnecting from hub
"""
pass
@property
def window_title(self):
return str(self)
def update_window_title(self):
self.setWindowTitle(self.window_title)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import getpass
import argparse
import readline
import json
import rlcompleter
import atexit
import glob
# tab completion
def complete(text, state):
return (glob.glob(text+'*')+[None])[state]
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(complete)
# history file
histfile = os.path.join(os.environ['HOME'], '.vxcage_history')
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.write_history_file, histfile)
del histfile, readline, rlcompleter
try:
import requests
from progressbar import *
from prettytable import PrettyTable
except ImportError as e:
sys.exit("ERROR: Missing dependency: %s" % e)
def color(text, color_code):
return '\x1b[%dm%s\x1b[0m' % (color_code, text)
def cyan(text):
return color(text, 36)
def bold(text):
return color(text, 1)
def logo():
print("")
print(cyan(" `o O o O .oOo .oOoO' .oOoO .oOo. "))
print(cyan(" O o OoO O O o o O OooO' "))
print(cyan(" o O o o o o O O o O "))
print(cyan(" `o' O O `OoO' `OoO'o `OoOo `OoO' "))
print(cyan(" O "))
print(cyan(" OoO' ") + " by nex")
print("")
def help():
print("Available commands:")
print(" " + bold("tags") + " Retrieve list of tags")
print(" " + bold("find") + " Query a file by md5, sha256, ssdeep, imphash, tag or date")
print(" " + bold("get") + " Download a file by sha256")
print(" " + bold("dump") + " Dump a list of md5, sha256, ssdeep hashes")
print(" " + bold("add") + " Upload a file to the server")
print(" " + bold("last") + " Retrieve a list of the last x files uploaded")
print(" " + bold("total") + " Total number of samples")
print(" " + bold("stats") + " File type stats")
print(" " )
print(" " + bold("version") + " Version of remote vxcage server")
print(" " + bold("license") + " Print the software license")
print(" " )
print(" " + bold("help | ?") + " Show this help")
print(" " + bold("exit | quit") + " Exit cli application")
class VxCage(object):
def __init__(self, host, port, xmock, ssl=False, auth=False):
self.host = host
self.port = port
self.ssl = ssl
self.auth = auth
self.xmock = xmock
self.username = None
self.password = None
def authenticate(self):
if self.auth:
self.username = raw_input("Username: ")
self.password = getpass.getpass("Password: ")
def build_url(self, route):
if self.ssl:
url = "https://"
if self.port is None:
self.port = 443
else:
if self.port is None:
self.port = 8080
url = "http://"
url += "%s:%s%s%s" % (self.host, self.port, self.xmock, route)
return url
def check_errors(self, code):
if code == 400:
print("ERROR: Invalid request format")
return True
elif code == 500:
print("ERROR: Unexpected error, check your server logs")
return True
else:
return False
def tags_list(self):
req = requests.get(self.build_url("/tags/list"),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if self.check_errors(req.status_code):
return
table = PrettyTable(["tag"])
table.align = "l"
table.padding_width = 1
for tag in res:
table.add_row([tag])
print(table)
print("Total: %s" % len(res))
def dump_list(self, hType):
req = requests.get(self.build_url("/malware/dump/"+hType),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if self.check_errors(req.status_code):
return
table = PrettyTable([hType])
table.align = "l"
table.padding_width = 1
for hType in res:
table.add_row(hType)
print(table)
print("Total: %s" % len(res))
def malware_total(self):
req = requests.get(self.build_url("/malware/total"),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if self.check_errors(req.status_code):
return
print("Total: %s" % res)
def malware_stats_total(self):
req = requests.get(self.build_url("/malware/total/stats"),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if self.check_errors(req.status_code):
return
self._print_list(res, ["File_type", "Count"])
def server_version(self):
req = requests.get(self.build_url("/about"),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if self.check_errors(req.status_code):
return
self._print_kv(res)
def license(self):
req = requests.get(self.build_url("/about/license"),
auth=(self.username, self.password),
verify=False)
if self.check_errors(req.status_code):
return
print req.text
def find_malware(self, term, value):
term = term.lower()
terms = ["md5", "sha256", "ssdeep", "imphash", "tag", "date"]
if not term in terms:
print("ERROR: Invalid search term [%s]" % (", ".join(terms)))
return
payload = {term : value}
req = requests.post(self.build_url("/malware/find"),
data=payload,
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if req.status_code == 404:
print("No file found matching your search")
return
if self.check_errors(req.status_code):
return
self._print_malware_info(res)
def last_x(self, x):
req = requests.get(self.build_url("/malware/last/"+x),
auth=(self.username, self.password),
verify=False)
try:
res = req.json()
except:
try:
res = req.json
except Exception as e:
print("ERROR: Unable to parse results: {0}".format(e))
return
if req.status_code == 404:
print("No data found matching your search")
return
if self.check_errors(req.status_code):
return
self._print_malware_info(res)
def get_malware(self, sha256, path):
if not os.path.exists(path):
print("ERROR: Folder does not exist at path %s" % path)
return
if not os.path.isdir(path):
print("ERROR: The path specified is not a directory.")
return
req = requests.get(self.build_url("/malware/get/%s" % sha256),
auth=(self.username, self.password),
verify=False)
if req.status_code == 404:
print("File not found")
return
if self.check_errors(req.status_code):
return
size = int(req.headers["Content-Length"].strip())
bytes = 0
widgets = [
"Download: ",
Percentage(),
" ",
Bar(marker=":"),
" ",
ETA(),
" ",
FileTransferSpeed()
]
progress = ProgressBar(widgets=widgets, maxval=size).start()
destination = os.path.join(path, sha256)
binary = open(destination, "wb")
for buf in req.iter_content(1024):
if buf:
binary.write(buf)
bytes += len(buf)
progress.update(bytes)
progress.finish()
binary.close()
print("File downloaded at path: %s" % destination)
def add_malware(self, path, tags=None):
if not os.path.exists(path):
print("ERROR: File does not exist at path %s" % path)
return
files = {"file": (os.path.basename(path), open(path, "rb"))}
payload = {"tags" : tags}
req = requests.post(self.build_url("/malware/add"),
auth=(self.username, self.password),
verify=False,
files=files,
data=payload)
if not self.check_errors(req.status_code):
print("File uploaded successfully")
def _is_number(self, s):
try:
float(s)
return True
except ValueError:
return False
def _print_kv(self, res):
table = PrettyTable(["Key","Value"])
table.align = "l"
table.padding_width = 1
for k,v in res.items():
table.add_row([k, v])
print(table)
def _print_list(self, res, title = ["Key", "Value"]):
table = PrettyTable(title)
table.align = "l"
table.padding_width = 1
for v in res:
table.add_row([v[0],v[1]])
print(table)
def _print_malware_info(self, res):
if isinstance(res, dict):
for key, value in res.items():
if key == "tags":
print("%s: %s" % (bold(key), ",".join(value)))
elif key == "virustotal":
vt = res["virustotal"]
try:
print('\033[1m' + "virustotal" + '\033[0m' + ": " + str(vt["positives"]) + "/" + str(vt["total"]) + " matches")
except:
print('\033[1m' + "virustotal" + '\033[0m' + ": -/- matches")
elif key == "exif":
exif = res["exif"]
#print('\033[1m' + "timestamp" + '\033[0m' + ": " + exif["EXE:TimeStamp"])
#print('\033[1m' + "character set" + '\033[0m' + ": " + exif["EXE:CharacterSet"])
else:
print("%s: %s" % (bold(key), value))
else:
table = PrettyTable(["md5",
"sha256",
"file_name",
"file_type",
"file_size",
"virustotal",
"created_at",
"tags"])
table.align = "l"
table.padding_width = 1
for entry in res:
table.add_row([entry["md5"],
entry["sha256"],
entry["file_name"],
entry["file_type"],
entry["file_size"],
entry["virustotal"]["virustotal"],
entry["created_at"],
", ".join(entry["tags"])])
print(table)
print("Total: %d" % len(res))
def run(self):
self.authenticate()
while True:
try:
raw = raw_input(cyan("vxcage> "))
except KeyboardInterrupt:
print("")
continue
except EOFError:
print("")
break
command = raw.strip().split(" ")
if (command[0] == "help" or command[0] == "?"):
help()
elif (command[0] == "version" or command[0] == "about"):
self.server_version()
elif (command[0] == "license"):
self.license()
elif command[0] == "total":
self.malware_total()
elif command[0] == "stats":
self.malware_stats_total()
elif command[0] == "tags":
self.tags_list()
elif command[0] == "last":
if len(command) == 2 and self._is_number(command[1]):
self.last_x(command[1])
else:
print("ERROR: Missing arguments (e.g. \"last <x>\")")
elif command[0] == "dump":
if len(command) == 2 and command[1] in ['md5', 'sha256', 'ssdeep']:
self.dump_list(command[1])
else:
print("ERROR: Missing arguments (e.g. \"dump <type>\")")
print(" Available types: md5, sha256, ssdeep")
elif command[0] == "find":
if len(command) == 3 and command[1] in ['md5', 'sha256', 'ssdeep', 'imphash', 'tag', 'date']:
self.find_malware(command[1], command[2])
else:
print("ERROR: Missing arguments (e.g. \"find <key> <value>\")")
print(" Available keys: md5, sha256, ssdeep, imphash, tag or date")
elif command[0] == "get":
if len(command) == 3:
self.get_malware(command[1], command[2])
else:
print("ERROR: Missing arguments (e.g. \"get <sha256> <path>\")")
elif command[0] == "add":
if len(command) == 2:
self.add_malware(command[1])
elif len(command) == 3:
self.add_malware(command[1], command[2])
else:
print("ERROR: Missing arguments (e.g. \"add <path> <comma separated tags>\")")
elif (command[0] == "quit" or command[0] == "exit"):
break
if __name__ == "__main__":
logo()
parser = argparse.ArgumentParser()
parser.add_argument("-H", "--host", help="Host of VxCage server", default="localhost", action="store", required=False)
parser.add_argument("-p", "--port", help="Port of VxCage server", action="store", required=False)
parser.add_argument("-s", "--ssl", help="Enable if the server is running over SSL", default=False, action="store_true", required=False)
parser.add_argument("-a", "--auth", help="Enable if the server is prompting an HTTP authentication", default=False, action="store_true", required=False)
parser.add_argument("-x", "--xmock", help="(api testing) URL of VxCage server mock service", default="", action="store", required=False)
args = parser.parse_args()
vx = VxCage(host=args.host, port=args.port, ssl=args.ssl, auth=args.auth, xmock=args.xmock)
vx.run()
| |
# This file is part of the django-environ.
#
# Copyright (c) 2021, Serghei Iakovlev <egrep@protonmail.ch>
# Copyright (c) 2013-2021, Daniele Faraglia <daniele.faraglia@gmail.com>
#
# For the full copyright and license information, please view
# the LICENSE.txt file that was distributed with this source code.
"""
Django-environ allows you to utilize 12factor inspired environment
variables to configure your Django application.
"""
import ast
import logging
import os
import re
import sys
import urllib.parse as urlparselib
import warnings
from urllib.parse import (
parse_qs,
ParseResult,
unquote_plus,
urlparse,
urlunparse,
)
from .compat import (
DJANGO_POSTGRES,
ImproperlyConfigured,
json,
PYMEMCACHE_DRIVER,
REDIS_DRIVER,
)
from .fileaware_mapping import FileAwareMapping
try:
from os import PathLike
except ImportError: # Python 3.5 support
from pathlib import PurePath as PathLike
Openable = (str, PathLike)
logger = logging.getLogger(__name__)
def _cast(value):
# Safely evaluate an expression node or a string containing a Python
# literal or container display.
# https://docs.python.org/3/library/ast.html#ast.literal_eval
try:
return ast.literal_eval(value)
except (ValueError, SyntaxError):
return value
def _cast_int(v):
"""Return int if possible."""
return int(v) if hasattr(v, 'isdigit') and v.isdigit() else v
def _cast_urlstr(v):
return unquote_plus(v) if isinstance(v, str) else v
class NoValue:
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
class Env:
"""Provide scheme-based lookups of environment variables so that each
caller doesn't have to pass in `cast` and `default` parameters.
Usage:::
env = Env(MAIL_ENABLED=bool, SMTP_LOGIN=(str, 'DEFAULT'))
if env('MAIL_ENABLED'):
...
"""
ENVIRON = os.environ
NOTSET = NoValue()
BOOLEAN_TRUE_STRINGS = ('true', 'on', 'ok', 'y', 'yes', '1')
URL_CLASS = ParseResult
POSTGRES_FAMILY = ['postgres', 'postgresql', 'psql', 'pgsql', 'postgis']
ELASTICSEARCH_FAMILY = ['elasticsearch' + x for x in ['', '2', '5', '7']]
DEFAULT_DATABASE_ENV = 'DATABASE_URL'
DB_SCHEMES = {
'postgres': DJANGO_POSTGRES,
'postgresql': DJANGO_POSTGRES,
'psql': DJANGO_POSTGRES,
'pgsql': DJANGO_POSTGRES,
'postgis': 'django.contrib.gis.db.backends.postgis',
'mysql': 'django.db.backends.mysql',
'mysql2': 'django.db.backends.mysql',
'mysql-connector': 'mysql.connector.django',
'mysqlgis': 'django.contrib.gis.db.backends.mysql',
'mssql': 'sql_server.pyodbc',
'oracle': 'django.db.backends.oracle',
'pyodbc': 'sql_server.pyodbc',
'redshift': 'django_redshift_backend',
'spatialite': 'django.contrib.gis.db.backends.spatialite',
'sqlite': 'django.db.backends.sqlite3',
'ldap': 'ldapdb.backends.ldap',
}
_DB_BASE_OPTIONS = [
'CONN_MAX_AGE',
'ATOMIC_REQUESTS',
'AUTOCOMMIT',
'DISABLE_SERVER_SIDE_CURSORS',
]
DEFAULT_CACHE_ENV = 'CACHE_URL'
CACHE_SCHEMES = {
'dbcache': 'django.core.cache.backends.db.DatabaseCache',
'dummycache': 'django.core.cache.backends.dummy.DummyCache',
'filecache': 'django.core.cache.backends.filebased.FileBasedCache',
'locmemcache': 'django.core.cache.backends.locmem.LocMemCache',
'memcache': 'django.core.cache.backends.memcached.MemcachedCache',
'pymemcache': PYMEMCACHE_DRIVER,
'pylibmc': 'django.core.cache.backends.memcached.PyLibMCCache',
'rediscache': REDIS_DRIVER,
'redis': REDIS_DRIVER,
'rediss': REDIS_DRIVER,
}
_CACHE_BASE_OPTIONS = [
'TIMEOUT',
'KEY_PREFIX',
'VERSION',
'KEY_FUNCTION',
'BINARY',
]
DEFAULT_EMAIL_ENV = 'EMAIL_URL'
EMAIL_SCHEMES = {
'smtp': 'django.core.mail.backends.smtp.EmailBackend',
'smtps': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+tls': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+ssl': 'django.core.mail.backends.smtp.EmailBackend',
'consolemail': 'django.core.mail.backends.console.EmailBackend',
'filemail': 'django.core.mail.backends.filebased.EmailBackend',
'memorymail': 'django.core.mail.backends.locmem.EmailBackend',
'dummymail': 'django.core.mail.backends.dummy.EmailBackend'
}
_EMAIL_BASE_OPTIONS = ['EMAIL_USE_TLS', 'EMAIL_USE_SSL']
DEFAULT_SEARCH_ENV = 'SEARCH_URL'
SEARCH_SCHEMES = {
"elasticsearch": "haystack.backends.elasticsearch_backend."
"ElasticsearchSearchEngine",
"elasticsearch2": "haystack.backends.elasticsearch2_backend."
"Elasticsearch2SearchEngine",
"elasticsearch5": "haystack.backends.elasticsearch5_backend."
"Elasticsearch5SearchEngine",
"elasticsearch7": "haystack.backends.elasticsearch7_backend."
"Elasticsearch7SearchEngine",
"solr": "haystack.backends.solr_backend.SolrEngine",
"whoosh": "haystack.backends.whoosh_backend.WhooshEngine",
"xapian": "haystack.backends.xapian_backend.XapianEngine",
"simple": "haystack.backends.simple_backend.SimpleEngine",
}
CLOUDSQL = 'cloudsql'
def __init__(self, **scheme):
self.smart_cast = True
self.escape_proxy = False
self.scheme = scheme
def __call__(self, var, cast=None, default=NOTSET, parse_default=False):
return self.get_value(
var,
cast=cast,
default=default,
parse_default=parse_default
)
def __contains__(self, var):
return var in self.ENVIRON
# Shortcuts
def str(self, var, default=NOTSET, multiline=False):
"""
:rtype: str
"""
value = self.get_value(var, cast=str, default=default)
if multiline:
return re.sub(r'(\\r)?\\n', r'\n', value)
return value
def unicode(self, var, default=NOTSET):
"""Helper for python2
:rtype: unicode
"""
return self.get_value(var, cast=str, default=default)
def bytes(self, var, default=NOTSET, encoding='utf8'):
"""
:rtype: bytes
"""
value = self.get_value(var, cast=str, default=default)
if hasattr(value, 'encode'):
return value.encode(encoding)
return value
def bool(self, var, default=NOTSET):
"""
:rtype: bool
"""
return self.get_value(var, cast=bool, default=default)
def int(self, var, default=NOTSET):
"""
:rtype: int
"""
return self.get_value(var, cast=int, default=default)
def float(self, var, default=NOTSET):
"""
:rtype: float
"""
return self.get_value(var, cast=float, default=default)
def json(self, var, default=NOTSET):
"""
:returns: Json parsed
"""
return self.get_value(var, cast=json.loads, default=default)
def list(self, var, cast=None, default=NOTSET):
"""
:rtype: list
"""
return self.get_value(
var,
cast=list if not cast else [cast],
default=default
)
def tuple(self, var, cast=None, default=NOTSET):
"""
:rtype: tuple
"""
return self.get_value(
var,
cast=tuple if not cast else (cast,),
default=default
)
def dict(self, var, cast=dict, default=NOTSET):
"""
:rtype: dict
"""
return self.get_value(var, cast=cast, default=default)
def url(self, var, default=NOTSET):
"""
:rtype: urlparse.ParseResult
"""
return self.get_value(
var,
cast=urlparse,
default=default,
parse_default=True
)
def db_url(self, var=DEFAULT_DATABASE_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to DATABASE_URL.
The db method is an alias for db_url.
:rtype: dict
"""
return self.db_url_config(
self.get_value(var, default=default),
engine=engine
)
db = db_url
def cache_url(self, var=DEFAULT_CACHE_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to CACHE_URL.
The cache method is an alias for cache_url.
:rtype: dict
"""
return self.cache_url_config(
self.url(var, default=default),
backend=backend
)
cache = cache_url
def email_url(self, var=DEFAULT_EMAIL_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to EMAIL_URL.
The email method is an alias for email_url.
:rtype: dict
"""
return self.email_url_config(
self.url(var, default=default),
backend=backend
)
email = email_url
def search_url(self, var=DEFAULT_SEARCH_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to SEARCH_URL.
:rtype: dict
"""
return self.search_url_config(
self.url(var, default=default),
engine=engine
)
def path(self, var, default=NOTSET, **kwargs):
"""
:rtype: Path
"""
return Path(self.get_value(var, default=default), **kwargs)
def get_value(self, var, cast=None, default=NOTSET, parse_default=False):
"""Return value for given environment variable.
:param var: Name of variable.
:param cast: Type to cast return value as.
:param default: If var not present in environ, return this instead.
:param parse_default: force to parse default..
:returns: Value from environment or default (if set)
"""
logger.debug("get '{}' casted as '{}' with default '{}'".format(
var, cast, default
))
if var in self.scheme:
var_info = self.scheme[var]
try:
has_default = len(var_info) == 2
except TypeError:
has_default = False
if has_default:
if not cast:
cast = var_info[0]
if default is self.NOTSET:
try:
default = var_info[1]
except IndexError:
pass
else:
if not cast:
cast = var_info
try:
value = self.ENVIRON[var]
except KeyError:
if default is self.NOTSET:
error_msg = "Set the {} environment variable".format(var)
raise ImproperlyConfigured(error_msg)
value = default
# Resolve any proxied values
prefix = b'$' if isinstance(value, bytes) else '$'
escape = rb'\$' if isinstance(value, bytes) else r'\$'
if hasattr(value, 'startswith') and value.startswith(prefix):
value = value.lstrip(prefix)
value = self.get_value(value, cast=cast, default=default)
if self.escape_proxy and hasattr(value, 'replace'):
value = value.replace(escape, prefix)
# Smart casting
if self.smart_cast:
if cast is None and default is not None and \
not isinstance(default, NoValue):
cast = type(default)
value = None if default is None and value == '' else value
if value != default or (parse_default and value):
value = self.parse_value(value, cast)
return value
# Class and static methods
@classmethod
def parse_value(cls, value, cast):
"""Parse and cast provided value
:param value: Stringed value.
:param cast: Type to cast return value as.
:returns: Casted value
"""
if cast is None:
return value
elif cast is bool:
try:
value = int(value) != 0
except ValueError:
value = value.lower() in cls.BOOLEAN_TRUE_STRINGS
elif isinstance(cast, list):
value = list(map(cast[0], [x for x in value.split(',') if x]))
elif isinstance(cast, tuple):
val = value.strip('(').strip(')').split(',')
value = tuple(map(cast[0], [x for x in val if x]))
elif isinstance(cast, dict):
key_cast = cast.get('key', str)
value_cast = cast.get('value', str)
value_cast_by_key = cast.get('cast', dict())
value = dict(map(
lambda kv: (
key_cast(kv[0]),
cls.parse_value(
kv[1],
value_cast_by_key.get(kv[0], value_cast)
)
),
[val.split('=') for val in value.split(';') if val]
))
elif cast is dict:
value = dict([val.split('=') for val in value.split(',') if val])
elif cast is list:
value = [x for x in value.split(',') if x]
elif cast is tuple:
val = value.strip('(').strip(')').split(',')
value = tuple([x for x in val if x])
elif cast is float:
# clean string
float_str = re.sub(r'[^\d,.-]', '', value)
# split for avoid thousand separator and different
# locale comma/dot symbol
parts = re.split(r'[,.]', float_str)
if len(parts) == 1:
float_str = parts[0]
else:
float_str = "{}.{}".format(''.join(parts[0:-1]), parts[-1])
value = float(float_str)
else:
value = cast(value)
return value
@classmethod
def db_url_config(cls, url, engine=None):
"""Pulled from DJ-Database-URL, parse an arbitrary Database URL.
Support currently exists for PostgreSQL, PostGIS, MySQL, Oracle and
SQLite.
SQLite connects to file based databases. The same URL format is used,
omitting the hostname, and using the "file" portion as the filename of
the database. This has the effect of four slashes being present for an
absolute file path.
"""
if not isinstance(url, cls.URL_CLASS):
if url == 'sqlite://:memory:':
# this is a special case, because if we pass this URL into
# urlparse, urlparse will choke trying to interpret "memory"
# as a port number
return {
'ENGINE': cls.DB_SCHEMES['sqlite'],
'NAME': ':memory:'
}
# note: no other settings are required for sqlite
url = urlparse(url)
config = {}
# Remove query strings.
path = url.path[1:]
path = unquote_plus(path.split('?', 2)[0])
if url.scheme == 'sqlite':
if path == '':
# if we are using sqlite and we have no path, then assume we
# want an in-memory database (this is the behaviour of
# sqlalchemy)
path = ':memory:'
if url.netloc:
warnings.warn('SQLite URL contains host component %r, '
'it will be ignored' % url.netloc, stacklevel=3)
if url.scheme == 'ldap':
path = '{scheme}://{hostname}'.format(
scheme=url.scheme,
hostname=url.hostname,
)
if url.port:
path += ':{port}'.format(port=url.port)
# Update with environment configuration.
config.update({
'NAME': path or '',
'USER': _cast_urlstr(url.username) or '',
'PASSWORD': _cast_urlstr(url.password) or '',
'HOST': url.hostname or '',
'PORT': _cast_int(url.port) or '',
})
if (
url.scheme in cls.POSTGRES_FAMILY and path.startswith('/')
or cls.CLOUDSQL in path and path.startswith('/')
):
config['HOST'], config['NAME'] = path.rsplit('/', 1)
if url.scheme == 'oracle' and path == '':
config['NAME'] = config['HOST']
config['HOST'] = ''
if url.scheme == 'oracle':
# Django oracle/base.py strips port and fails on non-string value
if not config['PORT']:
del (config['PORT'])
else:
config['PORT'] = str(config['PORT'])
if url.query:
config_options = {}
for k, v in parse_qs(url.query).items():
if k.upper() in cls._DB_BASE_OPTIONS:
config.update({k.upper(): _cast(v[0])})
else:
config_options.update({k: _cast_int(v[0])})
config['OPTIONS'] = config_options
if engine:
config['ENGINE'] = engine
else:
config['ENGINE'] = url.scheme
if config['ENGINE'] in Env.DB_SCHEMES:
config['ENGINE'] = Env.DB_SCHEMES[config['ENGINE']]
if not config.get('ENGINE', False):
warnings.warn("Engine not recognized from url: {}".format(config))
return {}
return config
@classmethod
def cache_url_config(cls, url, backend=None):
"""Pulled from DJ-Cache-URL, parse an arbitrary Cache URL.
:param url:
:param backend:
:return:
"""
if not isinstance(url, cls.URL_CLASS):
if not url:
return {}
else:
url = urlparse(url)
if url.scheme not in cls.CACHE_SCHEMES:
raise ImproperlyConfigured(
'Invalid cache schema {}'.format(url.scheme)
)
location = url.netloc.split(',')
if len(location) == 1:
location = location[0]
config = {
'BACKEND': cls.CACHE_SCHEMES[url.scheme],
'LOCATION': location,
}
# Add the drive to LOCATION
if url.scheme == 'filecache':
config.update({
'LOCATION': url.netloc + url.path,
})
# urlparse('pymemcache://127.0.0.1:11211')
# => netloc='127.0.0.1:11211', path=''
#
# urlparse('pymemcache://memcached:11211/?key_prefix=ci')
# => netloc='memcached:11211', path='/'
#
# urlparse('memcache:///tmp/memcached.sock')
# => netloc='', path='/tmp/memcached.sock'
if not url.netloc and url.scheme in ['memcache', 'pymemcache']:
config.update({
'LOCATION': 'unix:' + url.path,
})
elif url.scheme.startswith('redis'):
if url.hostname:
scheme = url.scheme.replace('cache', '')
else:
scheme = 'unix'
locations = [scheme + '://' + loc + url.path
for loc in url.netloc.split(',')]
if len(locations) == 1:
config['LOCATION'] = locations[0]
else:
config['LOCATION'] = locations
if url.query:
config_options = {}
for k, v in parse_qs(url.query).items():
opt = {k.upper(): _cast(v[0])}
if k.upper() in cls._CACHE_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
if backend:
config['BACKEND'] = backend
return config
@classmethod
def email_url_config(cls, url, backend=None):
"""Parses an email URL."""
config = {}
url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings
path = url.path[1:]
path = unquote_plus(path.split('?', 2)[0])
# Update with environment configuration
config.update({
'EMAIL_FILE_PATH': path,
'EMAIL_HOST_USER': _cast_urlstr(url.username),
'EMAIL_HOST_PASSWORD': _cast_urlstr(url.password),
'EMAIL_HOST': url.hostname,
'EMAIL_PORT': _cast_int(url.port),
})
if backend:
config['EMAIL_BACKEND'] = backend
elif url.scheme not in cls.EMAIL_SCHEMES:
raise ImproperlyConfigured('Invalid email schema %s' % url.scheme)
elif url.scheme in cls.EMAIL_SCHEMES:
config['EMAIL_BACKEND'] = cls.EMAIL_SCHEMES[url.scheme]
if url.scheme in ('smtps', 'smtp+tls'):
config['EMAIL_USE_TLS'] = True
elif url.scheme == 'smtp+ssl':
config['EMAIL_USE_SSL'] = True
if url.query:
config_options = {}
for k, v in parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._EMAIL_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
return config
@classmethod
def search_url_config(cls, url, engine=None):
config = {}
url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings.
path = url.path[1:]
path = unquote_plus(path.split('?', 2)[0])
if url.scheme not in cls.SEARCH_SCHEMES:
raise ImproperlyConfigured(
'Invalid search schema %s' % url.scheme
)
config["ENGINE"] = cls.SEARCH_SCHEMES[url.scheme]
# check commons params
params = {} # type: dict
if url.query:
params = parse_qs(url.query)
if 'EXCLUDED_INDEXES' in params.keys():
config['EXCLUDED_INDEXES'] \
= params['EXCLUDED_INDEXES'][0].split(',')
if 'INCLUDE_SPELLING' in params.keys():
config['INCLUDE_SPELLING'] = cls.parse_value(
params['INCLUDE_SPELLING'][0],
bool
)
if 'BATCH_SIZE' in params.keys():
config['BATCH_SIZE'] = cls.parse_value(
params['BATCH_SIZE'][0],
int
)
if url.scheme == 'simple':
return config
elif url.scheme in ['solr'] + cls.ELASTICSEARCH_FAMILY:
if 'KWARGS' in params.keys():
config['KWARGS'] = params['KWARGS'][0]
# remove trailing slash
if path.endswith("/"):
path = path[:-1]
if url.scheme == 'solr':
config['URL'] = urlunparse(
('http',) + url[1:2] + (path,) + ('', '', '')
)
if 'TIMEOUT' in params.keys():
config['TIMEOUT'] = cls.parse_value(params['TIMEOUT'][0], int)
return config
if url.scheme in cls.ELASTICSEARCH_FAMILY:
split = path.rsplit("/", 1)
if len(split) > 1:
path = "/".join(split[:-1])
index = split[-1]
else:
path = ""
index = split[0]
config['URL'] = urlunparse(
('http',) + url[1:2] + (path,) + ('', '', '')
)
if 'TIMEOUT' in params.keys():
config['TIMEOUT'] = cls.parse_value(params['TIMEOUT'][0], int)
config['INDEX_NAME'] = index
return config
config['PATH'] = '/' + path
if url.scheme == 'whoosh':
if 'STORAGE' in params.keys():
config['STORAGE'] = params['STORAGE'][0]
if 'POST_LIMIT' in params.keys():
config['POST_LIMIT'] = cls.parse_value(
params['POST_LIMIT'][0],
int
)
elif url.scheme == 'xapian':
if 'FLAGS' in params.keys():
config['FLAGS'] = params['FLAGS'][0]
if engine:
config['ENGINE'] = engine
return config
@classmethod
def read_env(cls, env_file=None, overwrite=False, **overrides):
"""Read a .env file into os.environ.
If not given a path to a dotenv path, does filthy magic stack
backtracking to find the dotenv in the same directory as the file that
called read_env.
Existing environment variables take precedent and are NOT overwritten
by the file content. ``overwrite=True`` will force an overwrite of
existing environment variables.
Refs:
- https://wellfire.co/learn/easier-12-factor-django
- https://gist.github.com/bennylope/2999704
:param env_file: The path to the `.env` file your application should
use. If a path is not provided, `read_env` will attempt to import
the Django settings module from the Django project root.
:param overwrite: ``overwrite=True`` will force an overwrite of
existing environment variables.
:param **overrides: Any additional keyword arguments provided directly
to read_env will be added to the environment. If the key matches an
existing environment variable, the value will be overridden.
"""
if env_file is None:
frame = sys._getframe()
env_file = os.path.join(
os.path.dirname(frame.f_back.f_code.co_filename),
'.env'
)
if not os.path.exists(env_file):
logger.info(
"%s doesn't exist - if you're not configuring your "
"environment separately, create one." % env_file)
return
try:
if isinstance(env_file, Openable):
# Python 3.5 support (wrap path with str).
with open(str(env_file)) as f:
content = f.read()
else:
with env_file as f:
content = f.read()
except OSError:
logger.info(
"%s not found - if you're not configuring your "
"environment separately, check this." % env_file)
return
logger.debug('Read environment variables from: {}'.format(env_file))
def _keep_escaped_format_characters(match):
"""Keep escaped newline/tabs in quoted strings"""
escaped_char = match.group(1)
if escaped_char in 'rnt':
return '\\' + escaped_char
return escaped_char
for line in content.splitlines():
m1 = re.match(r'\A(?:export )?([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', _keep_escaped_format_characters,
m3.group(1))
overrides[key] = str(val)
elif not line or line.startswith('#'):
# ignore warnings for empty line-breaks or comments
pass
else:
logger.warning('Invalid line: %s', line)
def set_environ(envval):
"""Return lambda to set environ.
Use setdefault unless overwrite is specified.
"""
if overwrite:
return lambda k, v: envval.update({k: str(v)})
return lambda k, v: envval.setdefault(k, str(v))
setenv = set_environ(cls.ENVIRON)
for key, value in overrides.items():
setenv(key, value)
class FileAwareEnv(Env):
"""
First look for environment variables with ``_FILE`` appended. If found,
their contents will be read from the file system and used instead.
Use as a drop-in replacement for the standard ``environ.Env``:
.. code-block:: python
python env = environ.FileAwareEnv()
For example, if a ``SECRET_KEY_FILE`` environment variable was set,
``env("SECRET_KEY")`` would find the related variable, returning the file
contents rather than ever looking up a ``SECRET_KEY`` environment variable.
"""
ENVIRON = FileAwareMapping()
class Path:
"""Inspired to Django Two-scoops, handling File Paths in Settings."""
def path(self, *paths, **kwargs):
"""Create new Path based on self.root and provided paths.
:param paths: List of sub paths
:param kwargs: required=False
:rtype: Path
"""
return self.__class__(self.__root__, *paths, **kwargs)
def file(self, name, *args, **kwargs):
"""Open a file.
:param name: Filename appended to self.root
:param args: passed to open()
:param kwargs: passed to open()
:rtype: file
"""
return open(self(name), *args, **kwargs)
@property
def root(self):
"""Current directory for this Path"""
return self.__root__
def __init__(self, start='', *paths, **kwargs):
super().__init__()
if kwargs.get('is_file', False):
start = os.path.dirname(start)
self.__root__ = self._absolute_join(start, *paths, **kwargs)
def __call__(self, *paths, **kwargs):
"""Retrieve the absolute path, with appended paths
:param paths: List of sub path of self.root
:param kwargs: required=False
"""
return self._absolute_join(self.__root__, *paths, **kwargs)
def __eq__(self, other):
return self.__root__ == other.__root__
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
if not isinstance(other, Path):
return Path(self.__root__, other)
return Path(self.__root__, other.__root__)
def __sub__(self, other):
if isinstance(other, int):
return self.path('../' * other)
elif isinstance(other, str):
if self.__root__.endswith(other):
return Path(self.__root__.rstrip(other))
raise TypeError(
"unsupported operand type(s) for -: '{self}' and '{other}' "
"unless value of {self} ends with value of {other}".format(
self=type(self), other=type(other)
)
)
def __invert__(self):
return self.path('..')
def __contains__(self, item):
base_path = self.__root__
if len(base_path) > 1:
base_path = os.path.join(base_path, '')
return item.__root__.startswith(base_path)
def __repr__(self):
return "<Path:{}>".format(self.__root__)
def __str__(self):
return self.__root__
def __unicode__(self):
return self.__str__()
def __getitem__(self, *args, **kwargs):
return self.__str__().__getitem__(*args, **kwargs)
def __fspath__(self):
return self.__str__()
def rfind(self, *args, **kwargs):
return self.__str__().rfind(*args, **kwargs)
def find(self, *args, **kwargs):
return self.__str__().find(*args, **kwargs)
@staticmethod
def _absolute_join(base, *paths, **kwargs):
absolute_path = os.path.abspath(os.path.join(base, *paths))
if kwargs.get('required', False) and not os.path.exists(absolute_path):
raise ImproperlyConfigured(
"Create required path: {}".format(absolute_path))
return absolute_path
def register_scheme(scheme):
for method in dir(urlparselib):
if method.startswith('uses_'):
getattr(urlparselib, method).append(scheme)
def register_schemes(schemes):
for scheme in schemes:
register_scheme(scheme)
# Register database and cache schemes in URLs.
register_schemes(Env.DB_SCHEMES.keys())
register_schemes(Env.CACHE_SCHEMES.keys())
register_schemes(Env.SEARCH_SCHEMES.keys())
register_schemes(Env.EMAIL_SCHEMES.keys())
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import model
import os
import webapp2
from oauth2client import client
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp import template
def project_logger_as_dict(project_logger):
project_logger_id = project_logger.key.id()
return {
'id': project_logger_id,
'projectId': project_logger.project.id(),
'projectName': project_logger.project.get().name,
'severity': str(project_logger.severity),
'message': project_logger.message,
'createdAt': project_logger.created_at.isoformat() + 'Z',
'updatedAt': project_logger.updated_at.isoformat() + 'Z'
}
def as_dict(project):
project_id = project.key.id()
assets = [{
'key': str(a),
'filename': blobstore.get(a).filename
} for a in project.assets]
if project.feed:
feed = {
'key': str(project.feed),
'filename': blobstore.get(project.feed).filename
}
else:
feed = None
return {
'id': project_id,
'name': project.name,
'profileId': project.profile_id,
'sheetsFeedUrl': project.sheets_feed_url,
'notes': project.notes,
'feedUploadUrl':
blobstore.create_upload_url('/api/projects/' + str(project_id) +
'/feed'),
'createdAt': project.created_at.isoformat() + 'Z',
'updatedAt': project.updated_at.isoformat() + 'Z',
'lastRunAt':
project.last_run_at.isoformat() + 'Z'
if project.last_run_at else None,
'lastCompletedAt':
project.last_completed_at.isoformat() + 'Z'
if project.last_completed_at else None,
'status': str(project.status),
'assets': assets,
'feed': feed
}
class ApiHandler(webapp2.RequestHandler):
def as_json(self, data):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(data))
class SettingsHandler(ApiHandler):
def get(self):
settings = model.show_settings()
self.as_json({
'username': settings.username,
'password': settings.password,
'config': settings.config,
})
def put(self):
data = json.loads(self.request.body)
settings = model.update_settings(data['username'], data['password'], data['config'])
self.as_json({
'username': settings.username,
'password': settings.password,
'config': settings.config,
})
class ProjectsHandler(ApiHandler):
def get(self):
cursor = self.request.get('pc')
projects = model.projects(cursor)
projects['entities'] = [as_dict(project) for project in projects['entities']]
self.as_json(projects)
class ProjectHandler(ApiHandler):
def post(self):
data = json.loads(self.request.body)
settings = model.show_settings()
config = json.loads(settings.config)
config_web = config.get('web', {})
client_id = config_web.get('client_id', '')
client_secret = config_web.get('client_secret', '')
credentials = client.credentials_from_code(
client_id,
client_secret,
['https://www.googleapis.com/auth/dfatrafficking'],
data['code'])
project = model.create_project(data['name'], data['profileId'],
credentials.to_json())
self.as_json(as_dict(project))
def get(self, project_id):
project_id = int(project_id)
project = model.show_project(project_id)
self.as_json(as_dict(project))
def put(self, project_id):
project_id = int(project_id)
data = json.loads(self.request.body)
project = model.update_project(project_id, data['name'], data['profileId'],
data['feed'], data['assets'],
data['sheetsFeedUrl'], data['notes'])
self.as_json(as_dict(project))
def delete(self, project_id):
project_id = int(project_id)
model.destroy_project(project_id)
class ProjectStatusHandler(ApiHandler):
def get(self, project_id):
project_id = int(project_id)
project = model.show_project(project_id)
self.as_json({'status': str(project.status)})
class ProjectLoggersHandler(ApiHandler):
def get(self, project_id):
cursor = self.request.get('lc')
project_id = int(project_id)
project_loggers = model.project_loggers(project_id, cursor)
project_loggers['entities'] = [project_logger_as_dict(project_logger) for project_logger in project_loggers['entities']]
self.as_json(project_loggers)
class ProjectFeedUploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self, project_id):
project_id = int(project_id)
upload = self.get_uploads()[0]
model.update_project_with_feed(project_id, upload.key())
self.response.headers['Content-Type'] = 'application/json'
self.response.write('{}')
class ProjectFeedDownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, project_id):
project_id = int(project_id)
project = model.show_project(project_id)
project_feed_key = project.feed
project_feed_info = blobstore.BlobInfo(project.feed)
if not blobstore.get(project_feed_key):
self.error(404)
else:
self.send_blob(project_feed_info, save_as=True)
class ProjectAssetUploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self, project_id):
project_id = int(project_id)
upload = self.get_uploads()[0]
model.update_project_with_asset(project_id, upload.key())
self.response.headers['Content-Type'] = 'application/json'
self.response.write('{}')
class ProjectAssetUploadUrlHandler(ApiHandler):
def get(self, project_id):
upload_url = blobstore.create_upload_url('/api/projects/' + project_id +
'/asset')
self.as_json({'uploadUrl': upload_url})
class ProjectRunHandler(ApiHandler):
def post(self, project_id):
project_id = int(project_id)
model.start_project_run(project_id)
self.as_json({})
def delete(self, project_id):
project_id = int(project_id)
model.cancel_project_run(project_id)
self.as_json({})
def check_auth(auth, stored_username, stored_password):
encoded_auth = auth[1]
username_colon_pass = base64.b64decode(encoded_auth)
username, password = username_colon_pass.split(':')
return username == stored_username and password == stored_password
class MainHandler(webapp2.RequestHandler):
def get(self):
settings = model.show_settings()
config = json.loads(settings.config)
client_id = config.get('web', {}).get('client_id', '')
auth = self.request.authorization
if auth is None or not check_auth(auth, settings.username, settings.password):
self.response.status_int = 401
self.response.headers['WWW-Authenticate'] = 'Basic realm="Login Required"'
return
template_values = {
'CLIENT_ID': client_id,
}
path = os.path.join(os.path.dirname(__file__), 'frontend', 'index.html')
output = template.render(path, template_values)
self.response.write(output)
app = webapp2.WSGIApplication(
[
webapp2.Route(
r'/', handler=MainHandler, methods=['GET']),
webapp2.Route(
r'/api/settings', handler=SettingsHandler, methods=['GET']),
webapp2.Route(
r'/api/settings', handler=SettingsHandler, methods=['PUT']),
webapp2.Route(
r'/api/projects', handler=ProjectsHandler, methods=['GET']),
webapp2.Route(
r'/api/projects', handler=ProjectHandler, methods=['POST']),
webapp2.Route(
r'/api/projects/<project_id>',
handler=ProjectHandler,
methods=['GET']),
webapp2.Route(
r'/api/projects/<project_id>',
handler=ProjectHandler,
methods=['PUT']),
webapp2.Route(
r'/api/projects/<project_id>',
handler=ProjectHandler,
methods=['DELETE']),
webapp2.Route(
r'/api/projects/<project_id>/status',
handler=ProjectStatusHandler,
methods=['GET']),
webapp2.Route(
r'/api/projects/<project_id>/log',
handler=ProjectLoggersHandler,
methods=['GET']),
webapp2.Route(
r'/api/projects/<project_id>/run',
handler=ProjectRunHandler,
methods=['POST']),
webapp2.Route(
r'/api/projects/<project_id>/run',
handler=ProjectRunHandler,
methods=['DELETE']),
webapp2.Route(
r'/api/projects/<project_id>/feed',
handler=ProjectFeedDownloadHandler,
methods=['GET']),
webapp2.Route(
r'/api/projects/<project_id>/feed',
handler=ProjectFeedUploadHandler,
methods=['POST']),
webapp2.Route(
r'/api/projects/<project_id>/asset',
handler=ProjectAssetUploadHandler,
methods=['POST']),
webapp2.Route(
r'/api/projects/<project_id>/asset_upload_url',
handler=ProjectAssetUploadUrlHandler,
methods=['GET'])
],
debug=True)
| |
"""Controls robot motion using sensors and simulation data."""
from collections import namedtuple, defaultdict
from time import sleep
import numpy as np
from components.util import within, initialized_coroutine
from components.messaging import Signal, Broadcaster
from components.concurrency import Reactor
from components.geometry import normalize_angle, positive_angle, direction_vector
from components.geometry import to_vector, vector_to_tuple, Pose
Motion = namedtuple("Motion", ["Name", "Control", "Direction", "Speed", "Data"])
Localize = namedtuple("Localize", ["Name", "Rectangle", "Side", "Data"])
Pause = namedtuple("Pause", ["Name", "Data"])
Wait = namedtuple("Wait", ["Name"])
Finished = namedtuple("Finished", ["Name", "Target"])
Color = namedtuple("Color", ["LeftColor", "RightColor"])
Beep = namedtuple("Beep", ["Note", "Duration"])
Servo = namedtuple("Servo", ["Angle"])
class PrimitiveController(Reactor, Broadcaster):
"""Takes primitive motion control commands and executes them.
Signals Received:
Will react to any Signal of correct name whose Namespace matches the name
of its robot.
Pose: Data should be a Pose. Will update the Controller's records of the
robot's Pose and check if the current motion, if it exists, has finished.
ResetPose: Data should be a Pose. Will update the Controller's records of the
robot's Pose.
Motion: Data should be a Motion command.
Localize: Data should be a Localize command.
Stop: stops the robot and cancels the active Motion command, if applicable.
Pause: stops the robot and pauses the active Motion command, if applicable.
Resume: resumes the paused active Motion command, if applicable.
Proximity: Data should be a 2-tuple of the left and right proximity values.
PSD: Data should be a positive int of the PSD scanner value.
Floor: Data should be a 2-tuple of the left and right floor values.
Motion Commands:
MoveTo: attempt to move in the robot's current direction to the target x-coord, y-coord,
or x,y-coord. Assumes the robot is already pointed in the correct direction (use the
RotateTowards command to achieve this). Data should be a 2-tuple of the target x and y
coordinates; to target only x-coord, give None as the y-coord; to target only y-coord,
give None as the x-coord.
MoveBy: move by the specified distance in the current direction. Data should be the
distance to move.
RotateTo: rotate to the specified absolute angle ccw from the +x axis. Data should
be a positive or negative angle in radians, or a 2-tuple of x and y offsets that
implies the target angle.
RotateTowards: rotate to the angle that points towards the target x,y-coord. Data
should be a 2-tuple of the target x and y coordinates.
RotateBy: rotate by the specified relative ccw angle change. Data should be a
positive or negative angle in radians.
MoveUntil: move in the robot's current direction until the specified criterion is met.
See the SensorDistance control mode for a description of the criterion.
RotateUntil: rotate in the specified direction until the specified criterion is met.
See the SensorDistance control mode for a description of the criterion.
Motion Command Control modes:
DeadReckoning: use only the virtual robot's position for motion control.
Associated with MoveTo, MoveBy, RotateTo, RotateTowards, RotateBy.
SensorDistance: stop the motion when some criterion involving sensor data is met.
Associated with MoveUntil, RotateUntil. Stopping criterion should be given as the
Data field of the command, and should be a function returning a boolean value and
taking as arguments the left proximity distance, right proximity distance, and
PSD distance.
Motion Command Speed should be a positive integer between 0 and 100.
Motion Command Directions:
MoveTo, MoveBy, MoveUntil:
1: the robot moves forwards to the target.
-1: the robot moves in reverse to the target.
RotateTowards:
1: the robot's front end will point towards the target.
-1: the robot's rear end will point towards the target.
RotateUntil:
1: the robot rotates counterclockwise.
-1: the robot rotates clockwise.
Localize Commands:
Proximity: localize using the left and/or right proximity sensors.
PSD: localize using the PSD scanner. Data should be the servo angle, or None to use the
previous servo angle.
Localize Command Rectangle should be the index of the in the world to
localize against, or None to guess which rectangle to localize against.
Localize Command Side should be "North", "South", "East", or "West",
specifying the side of the rectangle to localize against, or None to guess
which side of the rectangle to localize against.
Signals Broadcast:
Moved: sent when the last command has been executed. Data is a 2-tuple of the
last command and the current pose.
"""
def __init__(self, name, robot, monitor=None):
super(PrimitiveController, self).__init__(name)
self._robot = robot
self._robot.get_virtual().register("Pose", self)
self._robot.get_virtual().register("ResetPose", self)
if monitor is not None:
monitor.register("Floor", self)
monitor.register("Proximity", self)
monitor.register("PSD", self)
self.register("Servo", monitor)
self._robot_pose = robot.get_virtual().get_pose()
self._previous_pose = self._robot_pose
self._target_pose = None
self._waiting_psd_localization = None
self._distance_criterion = None
self._last_command = None
self._sensors = {
"floorLeft": defaultdict(lambda: None),
"floorRight": defaultdict(lambda: None),
"proximityLeft": defaultdict(lambda: None),
"proximityRight": defaultdict(lambda: None),
"psd": defaultdict(lambda: None)
}
# Implementation of parent abstract methods
def _react(self, signal):
if not signal.Namespace == self._robot.get_name():
return
if signal.Name == "Stop":
self.__finish_motion(False)
elif signal.Name == "Pause":
self._robot.move(0)
elif (signal.Name == "Resume" and self._target_pose is not None
and self._last_command is not None):
command = self._last_command
if command.Name == "MoveTo" or command.Name == "MoveBy":
self.__move_to(command.Direction, command.Speed, self._target_pose.Coord)
elif (command.Name == "RotateTo" or command.Name == "RotateBy"
or command.Name == "RotateTowards"):
self.__rotate_to(command.Speed, self._target_pose.Angle)
elif signal.Name == "ResetPose":
self._robot_pose = self._robot.get_virtual().get_pose()
self._previous_pose = self._robot_pose
elif signal.Name == "Pose":
self.__update_pose(signal.Data)
elif signal.Name == "Floor":
self._sensors["floorLeft"][signal.Namespace] = signal.Data[0]
self._sensors["floorRight"][signal.Namespace] = signal.Data[1]
elif signal.Name == "Proximity":
self._sensors["proximityLeft"][signal.Namespace] = signal.Data[0]
self._sensors["proximityRight"][signal.Namespace] = signal.Data[1]
elif signal.Name == "PSD":
self._sensors["psd"][signal.Namespace] = signal.Data
if signal.Data is not None and self._waiting_psd_localization is not None:
self.broadcast(Signal("LocalizePSD", self.get_name(), self._robot.get_name(),
(self._waiting_psd_localization.Rectangle,
self._waiting_psd_localization.Side)))
self._waiting_psd_localization = None
elif signal.Name == "Localize":
self.__react_localize(signal.Data)
elif signal.Name == "Motion" and signal.Data.Control == "DeadReckoning":
self.__react_motion_deadreckoning(signal.Data)
elif signal.Name == "Motion" and signal.Data.Control == "SensorDistance":
self.__react_motion_sensordistance(signal.Data)
def __update_pose(self, pose):
self._previous_pose = self._robot_pose
self._robot_pose = pose
if ((self._target_pose is not None and self.__reached_pose())
or self._distance_criterion is not None and self.__fulfilled_distance_criterion()):
self.__finish_motion(True)
def __finish_motion(self, whether_broadcast):
if whether_broadcast:
self.broadcast(Signal("Moved", self.get_name(), self._robot.get_name(),
(self._last_command, self._robot_pose)))
self._target_pose = None
self._distance_criterion = None
self._robot.move(0)
def __react_motion_deadreckoning(self, command):
if command.Name == "MoveTo":
self._last_command = command
self.__move_to(command.Direction, command.Speed, to_vector(*command.Data))
elif command.Name == "MoveBy":
self._last_command = command
current_direction = direction_vector(self._robot_pose.Angle)
current_direction = current_direction * command.Direction
target_coords = command.Data * current_direction + self._robot_pose.Coord
self.__move_to(command.Direction, command.Speed, target_coords)
elif command.Name == "RotateTo":
self._last_command = command
try:
target_angle = np.arctan2(command.Data[1], command.Data[0])
except TypeError:
target_angle = command.Data
self.__rotate_to(command.Speed, normalize_angle(target_angle))
elif command.Name == "RotateBy":
self._last_command = command
target_angle = normalize_angle(command.Data + self._robot_pose.Angle)
self.__rotate_to(command.Speed, target_angle)
elif command.Name == "RotateTowards":
self._last_command = command
target_coords = to_vector(*command.Data)
offset = vector_to_tuple(target_coords - self._robot_pose.Coord)
target_angle = np.arctan2(offset[1], offset[0])
if command.Direction == -1:
target_angle = normalize_angle(target_angle + np.pi)
self.__rotate_to(command.Speed, target_angle)
def __react_motion_sensordistance(self, command):
if command.Name == "MoveUntil":
self._last_command = command
self._distance_criterion = command.Data
self.__move_until(command.Direction, command.Speed)
elif command.Name == "RotateUntil":
self._last_command = command
self._distance_criterion = command.Data
self.__rotate_until(command.Direction, command.Speed)
def __react_localize(self, command):
if command.Name == "Proximity":
self.broadcast(Signal("LocalizeProx", self.get_name(), self._robot.get_name(),
(command.Rectangle, command.Side)))
elif command.Name == "PSD":
if command.Data is not None:
self.broadcast(Signal("Servo", self.get_name(), self._robot.get_name(),
command.Data))
self._waiting_psd_localization = command
self.broadcast(Signal("LocalizePSD", self.get_name(), self._robot.get_name(),
(command.Rectangle, command.Side)))
def __reached_pose(self):
if self._target_pose.Angle is not None:
target = self._target_pose.Angle
current = self._robot_pose.Angle
previous = self._previous_pose.Angle
return within(previous, current, target)
else:
target = vector_to_tuple(self._target_pose.Coord)
current = vector_to_tuple(self._robot_pose.Coord)
previous = vector_to_tuple(self._previous_pose.Coord)
within_x = target[0] is None or within(previous[0], current[0], target[0])
within_y = target[1] is None or within(previous[1], current[1], target[1])
return within_x and within_y
def __fulfilled_distance_criterion(self):
robot = self._robot
robot_name = robot.get_name()
prox_left = robot.to_prox_distance(self._sensors["proximityLeft"][robot_name])
prox_right = robot.to_prox_distance(self._sensors["proximityRight"][robot_name])
psd = robot.to_psd_distance(self._sensors["psd"][robot_name])
return self._distance_criterion(prox_left, prox_right, psd)
def __move_to(self, direction, speed, target):
self._target_pose = Pose(target, None)
if self.__reached_pose():
self.__finish_motion(True)
else:
self._robot.move(abs(speed) * direction)
def __rotate_to(self, speed, target):
delta = normalize_angle(normalize_angle(target) - normalize_angle(self._robot_pose.Angle))
self._target_pose = Pose(to_vector(None, None), delta + self._robot_pose.Angle)
if self.__reached_pose():
self.__finish_motion(True)
else:
self._robot.rotate(int(speed * np.sign(delta)))
def __move_until(self, direction, speed):
if self.__fulfilled_distance_criterion():
self.__finish_motion(True)
else:
self._robot.move(abs(speed) * direction)
def __rotate_until(self, direction, speed):
if self.__fulfilled_distance_criterion():
self.__finish_motion(True)
else:
self._robot.rotate(abs(speed) * direction)
class SimplePrimitivePlanner(Reactor, Broadcaster):
"""Sequentially broadcasts motion commands to a PrimitiveController.
Signals Received:
Will react to any Signal of correct name whose Namespace matches the name
of its robot.
Start: instructs the planner to start sending motion commands.
Reset: resets the planner to its initial state and discards all waiting Signals.
SetPose: indicates that the robot has finished localizing.
Continue: resumes a planner that is waiting (from a "Wait" command). The target of the
continue signal must match the robot's name.
Signals Sent:
Motion: broadcasts a motion command.
Localize: broadcasts a localization command.
Stop: broadcasts a signal to stop the controller when resetting the planner.
Beep: send a signal to beep the buzzer.
Servo: set the servo angle
Generator Commands:
Motion, Localize, Pause, Wait, Finished, Color, Beep, Servo
"""
def __init__(self, name, robot, monitor=None):
super(SimplePrimitivePlanner, self).__init__(name)
robot.get_virtual().register("SetPose", self)
self._robot = robot
self.__command_generator = self._generate_commands()
self._active = False
next(self.__command_generator)
# Implementation of parent abstract methods
def _react(self, signal):
if signal.Name == "Continue" and signal.Data == self._robot.get_name():
self._broadcast_next_command()
return
if not signal.Namespace == self._robot.get_name():
return
if signal.Name == "Start":
self._broadcast_next_command()
self._active = True
elif (signal.Name == "Moved" or signal.Name == "SetPose") and self._active:
self._broadcast_next_command()
elif signal.Name == "Reset":
self.broadcast(Signal("Stop", self.get_name(), self._robot.get_name(), None))
self.__command_generator.send(False)
self.clear()
self._active = False
def _broadcast_next_command(self):
command = None
while command is None:
command = self.__command_generator.send(True)
if command is None:
return
if type(command).__name__ == "Pause":
sleep(command.Data)
command = None
elif type(command).__name__ == "Finished":
self.broadcast(Signal("Continue", self.get_name(), self._robot.get_name(),
command.Target))
command = None
elif type(command).__name__ == "Color":
self._robot.led(command.LeftColor, command.RightColor)
command = None
elif type(command).__name__ == "Beep":
self.broadcast(Signal("Beep", self.get_name(), self._robot.get_name(),
(command.Note, command.Duration)))
command = None
elif type(command).__name__ == "Servo":
self.broadcast(Signal("Servo", self.get_name(), self._robot.get_name(),
command.Angle))
command = None
if type(command).__name__ == "Wait":
pass
elif type(command).__name__ == "Motion":
self.broadcast(Signal("Motion", self.get_name(), self._robot.get_name(), command))
elif type(command).__name__ == "Localize":
self.broadcast(Signal("Localize", self.get_name(), self._robot.get_name(), command))
# Abstract methods
def _generate_commands(self):
"""A generator that yields the next motion command.
Sending:
Send True into _generate_commands to get the next motion command.
Send False into _generate_commands to reset the generator.
Yielding:
The next motion command for the PrimitiveController.
None is a no-op, and indicates the the generator is done; to restart it,
reset the generator.
"""
yield None
| |
""" ietf_diffserv_classifier
This module contains a collection of YANG definitions for
configuring diffserv specification implementations.
Copyright (c) 2014 IETF Trust and the persons identified as
authors of the code. All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, is permitted pursuant to, and subject
to the license terms contained in, the Simplified BSD License
set forth in Section 4.c of the IETF Trust's Legal Provisions
Relating to IETF Documents
(http\://trustee.ietf.org/license\-info).
This version of this YANG module is part of RFC XXXX; see
the RFC itself for full legal notices.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class FilterTypeIdentity(object):
"""
This is identity of base filter\-type
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['FilterTypeIdentity']['meta_info']
class ClassifierEntryFilterOperationTypeIdentity(object):
"""
Classifier entry filter logical operation
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['ClassifierEntryFilterOperationTypeIdentity']['meta_info']
class Classifiers(object):
"""
list of classifier entry
.. attribute:: classifier_entry
classifier entry template
**type**\: list of :py:class:`ClassifierEntry <ydk.models.ietf.ietf_diffserv_classifier.Classifiers.ClassifierEntry>`
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
self.classifier_entry = YList()
self.classifier_entry.parent = self
self.classifier_entry.name = 'classifier_entry'
class ClassifierEntry(object):
"""
classifier entry template
.. attribute:: classifier_entry_name <key>
Diffserv classifier name
**type**\: str
.. attribute:: classifier_entry_descr
Description of the class template
**type**\: str
.. attribute:: classifier_entry_filter_operation
Filters are applicable as any or all filters
**type**\: :py:class:`ClassifierEntryFilterOperationTypeIdentity <ydk.models.ietf.ietf_diffserv_classifier.ClassifierEntryFilterOperationTypeIdentity>`
**default value**\: match-any-filter
.. attribute:: filter_entry
Filter configuration
**type**\: list of :py:class:`FilterEntry <ydk.models.ietf.ietf_diffserv_classifier.Classifiers.ClassifierEntry.FilterEntry>`
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.classifier_entry_name = None
self.classifier_entry_descr = None
self.classifier_entry_filter_operation = None
self.filter_entry = YList()
self.filter_entry.parent = self
self.filter_entry.name = 'filter_entry'
class FilterEntry(object):
"""
Filter configuration
.. attribute:: filter_type <key>
This leaf defines type of the filter
**type**\: :py:class:`FilterTypeIdentity <ydk.models.ietf.ietf_diffserv_classifier.FilterTypeIdentity>`
.. attribute:: filter_logical_not <key>
This is logical\-not operator for a filter. When true, it indicates filter looks for absence of a pattern defined by the filter
**type**\: bool
.. attribute:: destination_ip_address_cfg
list of destination ip address
**type**\: list of :py:class:`DestinationIpAddressCfg <ydk.models.ietf.ietf_diffserv_classifier.Classifiers.ClassifierEntry.FilterEntry.DestinationIpAddressCfg>`
.. attribute:: destination_port_cfg
list of ranges of destination port
**type**\: list of :py:class:`DestinationPortCfg <ydk.models.ietf.ietf_diffserv_classifier.Classifiers.ClassifierEntry.FilterEntry.DestinationPortCfg>`
.. attribute:: dscp_cfg
list of dscp ranges
**type**\: list of :py:class:`DscpCfg <ydk.models.ietf.ietf_diffserv_classifier.Classifiers.ClassifierEntry.FilterEntry.DscpCfg>`
.. attribute:: protocol_cfg
list of ranges of protocol values
**type**\: list of :py:class:`ProtocolCfg <ydk.models.ietf.ietf_diffserv_classifier.Classifiers.ClassifierEntry.FilterEntry.ProtocolCfg>`
.. attribute:: source_ip_address_cfg
list of source ip address
**type**\: list of :py:class:`SourceIpAddressCfg <ydk.models.ietf.ietf_diffserv_classifier.Classifiers.ClassifierEntry.FilterEntry.SourceIpAddressCfg>`
.. attribute:: source_port_cfg
list of ranges of source port
**type**\: list of :py:class:`SourcePortCfg <ydk.models.ietf.ietf_diffserv_classifier.Classifiers.ClassifierEntry.FilterEntry.SourcePortCfg>`
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.filter_type = None
self.filter_logical_not = None
self.destination_ip_address_cfg = YList()
self.destination_ip_address_cfg.parent = self
self.destination_ip_address_cfg.name = 'destination_ip_address_cfg'
self.destination_port_cfg = YList()
self.destination_port_cfg.parent = self
self.destination_port_cfg.name = 'destination_port_cfg'
self.dscp_cfg = YList()
self.dscp_cfg.parent = self
self.dscp_cfg.name = 'dscp_cfg'
self.protocol_cfg = YList()
self.protocol_cfg.parent = self
self.protocol_cfg.name = 'protocol_cfg'
self.source_ip_address_cfg = YList()
self.source_ip_address_cfg.parent = self
self.source_ip_address_cfg.name = 'source_ip_address_cfg'
self.source_port_cfg = YList()
self.source_port_cfg.parent = self
self.source_port_cfg.name = 'source_port_cfg'
class DscpCfg(object):
"""
list of dscp ranges
.. attribute:: dscp_min <key>
Minimum value of dscp range
**type**\: int
**range:** 0..63
.. attribute:: dscp_max <key>
maximum value of dscp range
**type**\: int
**range:** 0..63
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.dscp_min = None
self.dscp_max = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.dscp_min is None:
raise YPYModelError('Key property dscp_min is None')
if self.dscp_max is None:
raise YPYModelError('Key property dscp_max is None')
return self.parent._common_path +'/ietf-diffserv-classifier:dscp-cfg[ietf-diffserv-classifier:dscp-min = ' + str(self.dscp_min) + '][ietf-diffserv-classifier:dscp-max = ' + str(self.dscp_max) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.dscp_min is not None:
return True
if self.dscp_max is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['Classifiers.ClassifierEntry.FilterEntry.DscpCfg']['meta_info']
class SourceIpAddressCfg(object):
"""
list of source ip address
.. attribute:: source_ip_addr <key>
source ip prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.source_ip_addr = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.source_ip_addr is None:
raise YPYModelError('Key property source_ip_addr is None')
return self.parent._common_path +'/ietf-diffserv-classifier:source-ip-address-cfg[ietf-diffserv-classifier:source-ip-addr = ' + str(self.source_ip_addr) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.source_ip_addr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['Classifiers.ClassifierEntry.FilterEntry.SourceIpAddressCfg']['meta_info']
class DestinationIpAddressCfg(object):
"""
list of destination ip address
.. attribute:: destination_ip_addr <key>
destination ip prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(/(([0\-9])\|([0\-9]{2})\|(1[0\-1][0\-9])\|(12[0\-8])))
----
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.destination_ip_addr = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.destination_ip_addr is None:
raise YPYModelError('Key property destination_ip_addr is None')
return self.parent._common_path +'/ietf-diffserv-classifier:destination-ip-address-cfg[ietf-diffserv-classifier:destination-ip-addr = ' + str(self.destination_ip_addr) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.destination_ip_addr is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['Classifiers.ClassifierEntry.FilterEntry.DestinationIpAddressCfg']['meta_info']
class SourcePortCfg(object):
"""
list of ranges of source port
.. attribute:: source_port_min <key>
minimum value of source port range
**type**\: int
**range:** 0..65535
.. attribute:: source_port_max <key>
maximum value of source port range
**type**\: int
**range:** 0..65535
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.source_port_min = None
self.source_port_max = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.source_port_min is None:
raise YPYModelError('Key property source_port_min is None')
if self.source_port_max is None:
raise YPYModelError('Key property source_port_max is None')
return self.parent._common_path +'/ietf-diffserv-classifier:source-port-cfg[ietf-diffserv-classifier:source-port-min = ' + str(self.source_port_min) + '][ietf-diffserv-classifier:source-port-max = ' + str(self.source_port_max) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.source_port_min is not None:
return True
if self.source_port_max is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['Classifiers.ClassifierEntry.FilterEntry.SourcePortCfg']['meta_info']
class DestinationPortCfg(object):
"""
list of ranges of destination port
.. attribute:: destination_port_min <key>
minimum value of destination port range
**type**\: int
**range:** 0..65535
.. attribute:: destination_port_max <key>
maximum value of destination port range
**type**\: int
**range:** 0..65535
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.destination_port_min = None
self.destination_port_max = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.destination_port_min is None:
raise YPYModelError('Key property destination_port_min is None')
if self.destination_port_max is None:
raise YPYModelError('Key property destination_port_max is None')
return self.parent._common_path +'/ietf-diffserv-classifier:destination-port-cfg[ietf-diffserv-classifier:destination-port-min = ' + str(self.destination_port_min) + '][ietf-diffserv-classifier:destination-port-max = ' + str(self.destination_port_max) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.destination_port_min is not None:
return True
if self.destination_port_max is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['Classifiers.ClassifierEntry.FilterEntry.DestinationPortCfg']['meta_info']
class ProtocolCfg(object):
"""
list of ranges of protocol values
.. attribute:: protocol_min <key>
minimum value of protocol range
**type**\: int
**range:** 0..255
.. attribute:: protocol_max <key>
maximum value of protocol range
**type**\: int
**range:** 0..255
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
self.parent = None
self.protocol_min = None
self.protocol_max = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.protocol_min is None:
raise YPYModelError('Key property protocol_min is None')
if self.protocol_max is None:
raise YPYModelError('Key property protocol_max is None')
return self.parent._common_path +'/ietf-diffserv-classifier:protocol-cfg[ietf-diffserv-classifier:protocol-min = ' + str(self.protocol_min) + '][ietf-diffserv-classifier:protocol-max = ' + str(self.protocol_max) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.protocol_min is not None:
return True
if self.protocol_max is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['Classifiers.ClassifierEntry.FilterEntry.ProtocolCfg']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.filter_type is None:
raise YPYModelError('Key property filter_type is None')
if self.filter_logical_not is None:
raise YPYModelError('Key property filter_logical_not is None')
return self.parent._common_path +'/ietf-diffserv-classifier:filter-entry[ietf-diffserv-classifier:filter-type = ' + str(self.filter_type) + '][ietf-diffserv-classifier:filter-logical-not = ' + str(self.filter_logical_not) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.filter_type is not None:
return True
if self.filter_logical_not is not None:
return True
if self.destination_ip_address_cfg is not None:
for child_ref in self.destination_ip_address_cfg:
if child_ref._has_data():
return True
if self.destination_port_cfg is not None:
for child_ref in self.destination_port_cfg:
if child_ref._has_data():
return True
if self.dscp_cfg is not None:
for child_ref in self.dscp_cfg:
if child_ref._has_data():
return True
if self.protocol_cfg is not None:
for child_ref in self.protocol_cfg:
if child_ref._has_data():
return True
if self.source_ip_address_cfg is not None:
for child_ref in self.source_ip_address_cfg:
if child_ref._has_data():
return True
if self.source_port_cfg is not None:
for child_ref in self.source_port_cfg:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['Classifiers.ClassifierEntry.FilterEntry']['meta_info']
@property
def _common_path(self):
if self.classifier_entry_name is None:
raise YPYModelError('Key property classifier_entry_name is None')
return '/ietf-diffserv-classifier:classifiers/ietf-diffserv-classifier:classifier-entry[ietf-diffserv-classifier:classifier-entry-name = ' + str(self.classifier_entry_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.classifier_entry_name is not None:
return True
if self.classifier_entry_descr is not None:
return True
if self.classifier_entry_filter_operation is not None:
return True
if self.filter_entry is not None:
for child_ref in self.filter_entry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['Classifiers.ClassifierEntry']['meta_info']
@property
def _common_path(self):
return '/ietf-diffserv-classifier:classifiers'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.classifier_entry is not None:
for child_ref in self.classifier_entry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['Classifiers']['meta_info']
class ProtocolIdentity(FilterTypeIdentity):
"""
protocol filter\-type
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
FilterTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['ProtocolIdentity']['meta_info']
class DestinationIpAddressIdentity(FilterTypeIdentity):
"""
destination\-ip\-address filter\-type
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
FilterTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['DestinationIpAddressIdentity']['meta_info']
class MatchAnyFilterIdentity(ClassifierEntryFilterOperationTypeIdentity):
"""
Classifier entry filter logical OR operation
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
ClassifierEntryFilterOperationTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['MatchAnyFilterIdentity']['meta_info']
class SourcePortIdentity(FilterTypeIdentity):
"""
source\-port filter\-type
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
FilterTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['SourcePortIdentity']['meta_info']
class DscpIdentity(FilterTypeIdentity):
"""
DSCP filter\-type
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
FilterTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['DscpIdentity']['meta_info']
class DestinationPortIdentity(FilterTypeIdentity):
"""
destination\-port filter\-type
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
FilterTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['DestinationPortIdentity']['meta_info']
class SourceIpAddressIdentity(FilterTypeIdentity):
"""
source\-ip\-address filter\-type
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
FilterTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['SourceIpAddressIdentity']['meta_info']
class MatchAllFilterIdentity(ClassifierEntryFilterOperationTypeIdentity):
"""
Classifier entry filter logical AND operation
"""
_prefix = 'classifier'
_revision = '2015-04-07'
def __init__(self):
ClassifierEntryFilterOperationTypeIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.ietf._meta import _ietf_diffserv_classifier as meta
return meta._meta_table['MatchAllFilterIdentity']['meta_info']
| |
import unittest
import xo.game as game
from xo.error import IllegalStateError
from xo.game import Game
class InitStateTestCase(unittest.TestCase):
def setUp(self):
self.game = Game()
def test_it_is_in_init_state(self):
self.assertEqual(self.game.state, game.STATE_INIT)
self.assertIsNone(self.game.board)
self.assertIsNone(self.game.turn)
self.assertIsNone(self.game.next_turn())
def test_it_is_not_allowed_to_call_moveto(self):
with self.assertRaisesRegex(IllegalStateError, game.STATE_INIT):
self.game.moveto(1, 1)
def test_it_is_not_allowed_to_call_restart(self):
with self.assertRaisesRegex(IllegalStateError, game.STATE_INIT):
self.game.restart()
class PlayingStateTestCase(unittest.TestCase):
def setUp(self):
self.game = Game()
self.game.start('x')
def test_it_is_in_playing_state(self):
self.assertEqual(self.game.state, game.STATE_PLAYING)
self.assertEqual(str(self.game.board), '.........')
self.assertEqual(self.game.turn, 'x')
self.assertEqual(self.game.next_turn(), 'o')
def test_it_is_not_allowed_to_call_start(self):
with self.assertRaisesRegex(IllegalStateError, game.STATE_PLAYING):
self.game.start('o')
def test_it_is_not_allowed_to_call_restart(self):
with self.assertRaisesRegex(IllegalStateError, game.STATE_PLAYING):
self.game.restart()
class GameoverStateTestCase(unittest.TestCase):
def setUp(self):
self.game = Game()
self.game.start('o')
self.game.moveto(1, 1)
self.game.moveto(1, 2)
self.game.moveto(2, 1)
self.game.moveto(2, 2)
self.game.moveto(3, 1)
def test_it_is_in_gameover_state(self):
self.assertEqual(self.game.state, game.STATE_GAMEOVER)
self.assertEqual(str(self.game.board), 'ox.ox.o..')
self.assertEqual(self.game.turn, 'o')
self.assertEqual(self.game.next_turn(), 'x')
self.assertEqual(self.game.statistics['total'], 1)
self.assertEqual(self.game.statistics['xwins'], 0)
self.assertEqual(self.game.statistics['owins'], 1)
self.assertEqual(self.game.statistics['squashed'], 0)
def test_it_is_not_allowed_to_call_start(self):
with self.assertRaisesRegex(IllegalStateError, game.STATE_GAMEOVER):
self.game.start('x')
def test_it_is_not_allowed_to_call_moveto(self):
with self.assertRaisesRegex(IllegalStateError, game.STATE_GAMEOVER):
self.game.moveto(2, 2)
class GamePlayTestCase(unittest.TestCase):
def setUp(self):
self.game = Game()
self.game.start('x')
def test_when_move_is_out_of_bounds(self):
event = self.game.moveto(0, 1)
self.assertEqual(event['name'], game.EVENT_NAME_INVALID_MOVE)
self.assertEqual(event['reason'], game.EVENT_REASON_OUT_OF_BOUNDS)
def test_when_move_is_to_an_occupied_position(self):
self.game.moveto(1, 1)
event = self.game.moveto(1, 1)
self.assertEqual(event['name'], game.EVENT_NAME_INVALID_MOVE)
self.assertEqual(event['reason'], game.EVENT_REASON_OCCUPIED)
def test_when_next_turn(self):
event = self.game.moveto(1, 1)
self.assertEqual(event['name'], game.EVENT_NAME_NEXT_TURN)
self.assertEqual(
event['last_move'],
{ 'r': 1, 'c': 1, 'token': 'x' }
)
self.assertEqual(self.game.state, game.STATE_PLAYING)
self.assertEqual(str(self.game.board), 'x........')
self.assertEqual(self.game.turn, 'o')
self.assertEqual(self.game.next_turn(), 'x')
def test_when_x_wins(self):
self.game.moveto(2, 2)
self.game.moveto(1, 2)
self.game.moveto(2, 1)
self.game.moveto(2, 3)
self.game.moveto(1, 1)
self.game.moveto(3, 1)
event = self.game.moveto(3, 3)
self.assertEqual(event['name'], game.EVENT_NAME_GAMEOVER)
self.assertEqual(
event['last_move'],
{ 'r': 3, 'c': 3, 'token': 'x' }
)
self.assertEqual(
event['details'],
[{
'index': 1,
'where': 'diagonal',
'positions': [(1, 1), (2, 2), (3, 3)]
}]
)
self.assertEqual(self.game.state, game.STATE_GAMEOVER)
self.assertEqual(str(self.game.board), 'xo.xxoo.x')
self.assertEqual(self.game.turn, 'x')
self.assertEqual(self.game.next_turn(), 'o')
self.assertEqual(self.game.statistics['total'], 1)
self.assertEqual(self.game.statistics['xwins'], 1)
self.assertEqual(self.game.statistics['owins'], 0)
self.assertEqual(self.game.statistics['squashed'], 0)
def test_when_game_is_squashed(self):
self.game.moveto(1, 1)
self.game.moveto(2, 2)
self.game.moveto(3, 3)
self.game.moveto(2, 3)
self.game.moveto(2, 1)
self.game.moveto(3, 1)
self.game.moveto(1, 3)
self.game.moveto(1, 2)
event = self.game.moveto(3, 2)
self.assertEqual(event['name'], game.EVENT_NAME_GAMEOVER)
self.assertEqual(event['reason'], game.EVENT_REASON_SQUASHED)
self.assertEqual(
event['last_move'],
{ 'r': 3, 'c': 2, 'token': 'x' }
)
self.assertEqual(self.game.state, game.STATE_GAMEOVER)
self.assertEqual(str(self.game.board), 'xoxxoooxx')
self.assertEqual(self.game.turn, 'x')
self.assertEqual(self.game.next_turn(), 'o')
self.assertEqual(self.game.statistics['total'], 1)
self.assertEqual(self.game.statistics['xwins'], 0)
self.assertEqual(self.game.statistics['owins'], 0)
self.assertEqual(self.game.statistics['squashed'], 1)
class RestartGameTestCase(unittest.TestCase):
def setUp(self):
self.game = Game()
self.game.start('o')
def test_restart_after_a_win(self):
self.game.moveto(1, 3)
self.game.moveto(1, 1)
self.game.moveto(2, 3)
self.game.moveto(2, 1)
self.game.moveto(3, 3)
self.game.restart()
self.assertEqual(self.game.state, game.STATE_PLAYING)
self.assertEqual(str(self.game.board), '.........')
self.assertEqual(self.game.turn, 'o')
self.assertEqual(self.game.next_turn(), 'x')
self.assertEqual(self.game.statistics['total'], 1)
self.assertEqual(self.game.statistics['xwins'], 0)
self.assertEqual(self.game.statistics['owins'], 1)
self.assertEqual(self.game.statistics['squashed'], 0)
def test_restart_after_a_squashed_game(self):
self.game.moveto(1, 1)
self.game.moveto(2, 2)
self.game.moveto(3, 3)
self.game.moveto(2, 3)
self.game.moveto(2, 1)
self.game.moveto(3, 1)
self.game.moveto(1, 3)
self.game.moveto(1, 2)
self.game.moveto(3, 2)
self.game.restart()
self.assertEqual(self.game.state, game.STATE_PLAYING)
self.assertEqual(str(self.game.board), '.........')
self.assertEqual(self.game.turn, 'x')
self.assertEqual(self.game.next_turn(), 'o')
self.assertEqual(self.game.statistics['total'], 1)
self.assertEqual(self.game.statistics['xwins'], 0)
self.assertEqual(self.game.statistics['owins'], 0)
self.assertEqual(self.game.statistics['squashed'], 1)
| |
#-*- coding: utf-8 -*-
# stino/compiler.py
import os
import re
import threading
import subprocess
import sublime
from . import fileutil
from . import textutil
from . import constant
from . import serial
from . import base
from . import preprocess
from . import sketch
from . import console
ram_size_dict = {}
ram_size_dict['attiny44'] = '256'
ram_size_dict['attiny45'] = '256'
ram_size_dict['attiny84'] = '512'
ram_size_dict['attiny85'] = '512'
ram_size_dict['atmega8'] = '1024'
ram_size_dict['atmega168'] = '1024'
ram_size_dict['atmega328p'] = '2048'
ram_size_dict['atmega644'] = '4096'
ram_size_dict['atmega644p'] = '4096'
ram_size_dict['atmega1284'] = '16384'
ram_size_dict['atmega1284p'] = '16384'
ram_size_dict['atmega1280'] = '4096'
ram_size_dict['atmega2560'] = '8196'
ram_size_dict['atmega32u4'] = '2560'
ram_size_dict['at90usb162'] = '512'
ram_size_dict['at90usb646'] = '4096'
ram_size_dict['at90usb1286'] = '8192'
ram_size_dict['cortex-m3'] = '98304'
ram_size_dict['cortex-m4'] = '16384'
class Args:
def __init__(self, cur_project, arduino_info):
self.args = getFullArgs(cur_project, arduino_info)
def getArgs(self):
return self.args
class Command:
def __init__(self, command):
self.in_file = ''
self.out_file = ''
self.command = command
self.calc_size = False
self.stdout = ''
self.out_text = ''
def run(self, output_console):
output_console.printText(self.out_text)
if self.out_file:
message = 'Creating %s...\n' % self.out_file
output_console.printText(message)
cur_command = formatCommand(self.command)
compile_proc = subprocess.Popen(cur_command, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, shell = True)
result = compile_proc.communicate()
return_code = compile_proc.returncode
stdout = result[0].decode(constant.sys_encoding).replace('\r', '')
stderr = result[1].decode(constant.sys_encoding).replace('\r', '')
self.stdout = stdout
show_compilation_output = constant.sketch_settings.get('show_compilation_output', False)
if show_compilation_output:
output_console.printText(self.command)
output_console.printText('\n')
output_console.printText(stdout)
output_console.printText(stderr)
return return_code
def isSizeCommand(self):
return self.calc_size
def setSizeCommand(self):
self.calc_size = True
def getOutFile(self):
return self.out_file
def getCommand(self):
return self.command
def getStdout(self):
return self.stdout
def setInFile(self, in_file):
self.in_file = in_file
def setOutFile(self, out_file):
self.out_file = out_file
def setCommand(self, command):
self.command = command
def setOutputText(self, text):
self.out_text = text
class Compiler:
def __init__(self, arduino_info, cur_project, args):
self.arduino_info = arduino_info
self.cur_project = cur_project
self.args = args.getArgs()
self.output_console = console.Console(cur_project.getName())
self.no_error = True
self.is_finished = False
self.prepare()
def getOutputConsole(self):
return self.output_console
def isFinished(self):
return self.is_finished
def noError(self):
return self.no_error
def prepare(self):
self.command_list = []
if self.args:
self.command_list = genCommandList(self.args, self.cur_project, self.arduino_info)
def run(self):
if self.command_list:
compilation_thread = threading.Thread(target=self.compile)
compilation_thread.start()
else:
self.no_error = False
self.is_finished = True
self.output_console.printText('Please choose the Ardunio Application Folder.')
def compile(self):
self.output_console.printText('Compiling %s...\n' % self.cur_project.getName())
for cur_command in self.command_list:
return_code = cur_command.run(self.output_console)
if return_code > 0:
self.output_console.printText('[Stino - Error %d]\n' % return_code)
self.no_error = False
break
else:
if cur_command.isSizeCommand():
stdout = cur_command.getStdout()
printSizeInfo(self.output_console, stdout, self.args)
if self.no_error:
self.output_console.printText('[Stino - Done compiling.]\n')
self.is_finished = True
def getChosenArgs(arduino_info):
args = {}
platform_list = arduino_info.getPlatformList()
if len(platform_list) > 1:
platform_id = constant.sketch_settings.get('platform', -1)
if not ((platform_id > 0) and (platform_id < len(platform_list))):
platform_id = 1
cur_platform = platform_list[platform_id]
platform_name = cur_platform.getName()
constant.sketch_settings.set('platform', platform_id)
constant.sketch_settings.set('platform_name', platform_name)
selected_platform = platform_list[platform_id]
board_list = selected_platform.getBoardList()
board_id = constant.sketch_settings.get('board', -1)
if board_list:
serial_port = getSelectedSerialPort()
args['serial.port'] = serial_port
if not (board_id > -1 or board_id < len(board_list)):
board_id = 0
constant.sketch_settings.set('board', board_id)
selected_board = board_list[board_id]
args.update(selected_board.getArgs())
board_option_list = selected_board.getOptionList()
if board_option_list:
board_option_key = '%d.%d' % (platform_id, board_id)
board_option_dict = constant.sketch_settings.get('board_option', {})
if board_option_key in board_option_dict:
option_item_id_list = board_option_dict[board_option_key]
if len(option_item_id_list) < len(board_option_list):
option_item_id_list = []
else:
option_item_id_list = []
if not option_item_id_list:
for board_option in board_option_list:
option_item_id_list.append(0)
for board_option in board_option_list:
index = board_option_list.index(board_option)
option_item_id = option_item_id_list[index]
option_item_list = board_option.getItemList()
option_item = option_item_list[option_item_id]
option_item_args = option_item.getArgs()
args.update(option_item_args)
if 'build.vid' in args:
if not 'build.extra_flags' in args:
args['build.extra_flags'] = '-DUSB_VID={build.vid} -DUSB_PID={build.pid}'
if 'bootloader.path' in args:
bootloader_path = args['bootloader.path']
if 'bootloader.file' in args:
bootloader_file = args['bootloader.file']
bootloader_file = bootloader_path + '/' + bootloader_file
args['bootloader.file'] = bootloader_file
programmer_list = selected_platform.getProgrammerList()
if programmer_list:
platform_programmer_dict = constant.sketch_settings.get('programmer', {})
if str(platform_id) in platform_programmer_dict:
programmer_id = platform_programmer_dict[str(platform_id)]
else:
programmer_id = 0
programmer = programmer_list[programmer_id]
programmer_args = programmer.getArgs()
args.update(programmer_args)
platform_file = getPlatformFile(arduino_info)
args = addBuildUsbValue(args, platform_file)
args = replaceAllDictValue(args)
if not 'upload.maximum_ram_size' in args:
args['upload.maximum_ram_size'] = '0'
if 'build.mcu' in args:
build_mcu = args['build.mcu']
if build_mcu in ram_size_dict:
args['upload.maximum_ram_size'] = ram_size_dict[build_mcu]
if 'build.elide_constructors' in args:
if args['build.elide_constructors'] == 'true':
args['build.elide_constructors'] = '-felide-constructors'
else:
args['build.elide_constructors'] = ''
if 'build.cpu' in args:
args['build.mcu'] = args['build.cpu']
if 'build.gnu0x' in args:
if args['build.gnu0x'] == 'true':
args['build.gnu0x'] = '-std=gnu++0x'
else:
args['build.gnu0x'] = ''
if 'build.cpp0x' in args:
if args['build.cpp0x'] == 'true':
args['build.cpp0x'] = '-std=c++0x'
else:
args['build.cpp0x'] = ''
return args
def getSelectedSerialPort():
serial_port = 'no_serial_port'
serial_port_list = serial.getSerialPortList()
if serial_port_list:
serial_port_id = constant.sketch_settings.get('serial_port', -1)
if not (serial_port_id > -1 and serial_port_id < len(serial_port_list)):
serial_port_id = 0
constant.sketch_settings.set('serial_port', serial_port_id)
serial_port = serial_port_list[serial_port_id]
return serial_port
def getReplaceTextList(text):
pattern_text = r'\{\S+?}'
pattern = re.compile(pattern_text)
replace_text_list = pattern.findall(text)
return replace_text_list
def replaceValueText(value_text, args_dict):
replace_text_list = getReplaceTextList(value_text)
for replace_text in replace_text_list:
key = replace_text[1:-1]
if key in args_dict:
value = args_dict[key]
else:
value = ''
value_text = value_text.replace(replace_text, value)
return value_text
def replaceAllDictValue(args_dict):
for key in args_dict:
value_text = args_dict[key]
value_text = replaceValueText(value_text, args_dict)
args_dict[key] = value_text
return args_dict
def addBuildUsbValue(args, platform_file):
lines = fileutil.readFileLines(platform_file)
for line in lines:
line = line.strip()
if line and not '#' in line:
(key, value) = textutil.getKeyValue(line)
if 'extra_flags' in key:
continue
if 'build.' in key:
if 'usb_manufacturer' in key:
if not value:
value = 'unknown'
value = replaceValueText(value, args)
if constant.sys_platform == 'windows':
value = value.replace('"', '\\"')
value = value.replace('\'', '"')
args[key] = value
return args
def getDefaultArgs(cur_project, arduino_info):
core_folder = getCoreFolder(arduino_info)
arduino_folder = base.getArduinoFolder()
ide_path = os.path.join(arduino_folder, 'hardware')
project_name = cur_project.getName()
serial_port = getSelectedSerialPort()
archive_file = 'core.a'
build_system_path = os.path.join(core_folder, 'system')
arduino_version = arduino_info.getVersion()
build_folder = getBuildFolder(cur_project)
args = {}
args['runtime.ide.path'] = arduino_folder
args['ide.path'] = ide_path
args['build.project_name'] = project_name
args['serial.port.file'] = serial_port
args['archive_file'] = archive_file
args['software'] = 'ARDUINO'
args['runtime.ide.version'] = '%d' % arduino_version
args['source_file'] = '{source_file}'
args['object_file'] = '{object_file}'
args['object_files'] = '{object_files}'
args['includes'] = '{includes}'
args['build.path'] = build_folder
return args
def getBuildFolder(cur_project):
build_folder = constant.sketch_settings.get('build_folder', '')
if not (build_folder and os.path.isdir(build_folder)):
document_folder = fileutil.getDocumentFolder()
build_folder = os.path.join(document_folder, 'Arduino_Build')
project_name = cur_project.getName()
build_folder = os.path.join(build_folder, project_name)
checkBuildFolder(build_folder)
return build_folder
def checkBuildFolder(build_folder):
if os.path.isfile(build_folder):
os.remove(build_folder)
if not os.path.exists(build_folder):
os.makedirs(build_folder)
file_name_list = fileutil.listDir(build_folder, with_dirs = False)
for file_name in file_name_list:
file_ext = os.path.splitext(file_name)[1]
if file_ext in ['.d']:
cur_file = os.path.join(build_folder, file_name)
os.remove(cur_file)
def getDefaultPlatformFile(arduino_info):
file_name = 'arduino_avr.txt'
platform_file = ''
platform_list = arduino_info.getPlatformList()
platform_id = constant.sketch_settings.get('platform', 1)
platform = platform_list[platform_id]
platform_name = platform.getName()
if 'Arduino ARM' in platform_name:
file_name = 'arduino_arm.txt'
elif 'Teensy' in platform_name:
board_list = platform.getBoardList()
board_id = constant.sketch_settings.get('board', 0)
board = board_list[board_id]
board_name = board.getName()
board_version = float(board_name.split()[1])
if board_version >= 3.0:
file_name = 'teensy_arm.txt'
else:
file_name = 'teensy_avr.txt'
elif 'Zpuino' in platform_name:
file_name = 'zpuino.txt'
platform_file = os.path.join(constant.compile_root, file_name)
return platform_file
def getCoreFolder(arduino_info):
platform_list = arduino_info.getPlatformList()
platform_id = constant.sketch_settings.get('platform', -1)
if not ((platform_id > 0) and (platform_id < len(platform_list))):
platform_id = 1
cur_platform = platform_list[platform_id]
platform_name = cur_platform.getName()
constant.sketch_settings.set('platform', platform_id)
constant.sketch_settings.set('platform_name', platform_name)
platform = platform_list[platform_id]
core_folder = ''
core_folder_list = platform.getCoreFolderList()
for cur_core_folder in core_folder_list:
platform_file = os.path.join(cur_core_folder, 'platform.txt')
if os.path.isfile(platform_file):
core_folder = cur_core_folder
break
return core_folder
def getPlatformFile(arduino_info):
core_folder = getCoreFolder(arduino_info)
if core_folder:
platform_file = os.path.join(core_folder, 'platform.txt')
else:
platform_file = getDefaultPlatformFile(arduino_info)
return platform_file
def splitPlatformFile(platform_file):
text = fileutil.readFile(platform_file)
index = text.index('recipe.')
text_header = text[:index]
text_body = text[index:]
return (text_header, text_body)
def getPlatformArgs(platform_text, args):
lines = platform_text.split('\n')
for line in lines:
line = line.strip()
if line and not '#' in line:
(key, value) = textutil.getKeyValue(line)
value = replaceValueText(value, args)
if 'tools.avrdude.' in key:
key = key.replace('tools.avrdude.', '')
if 'tools.bossac.' in key:
key = key.replace('tools.bossac.', '')
if 'tools.teensy.' in key:
key = key.replace('tools.teensy.', '')
if 'params.' in key:
key = key.replace('params.', '')
if constant.sys_platform == 'linux':
if '.linux' in key:
key = key.replace('.linux', '')
show_upload_output = constant.sketch_settings.get('show_upload_output', False)
if not show_upload_output:
if '.quiet' in key:
key = key.replace('.quiet', '.verbose')
if '.verbose' in key:
verify_code = constant.sketch_settings.get('verify_code', False)
if verify_code:
value += ' -V'
if key == 'build.extra_flags':
if key in args:
continue
args[key] = value
return args
def getFullArgs(cur_project, arduino_info):
args = {}
board_args = getChosenArgs(arduino_info)
if board_args:
default_args = getDefaultArgs(cur_project, arduino_info)
args.update(default_args)
args.update(board_args)
platform_file = getPlatformFile(arduino_info)
(platform_text_header, platform_text_body) = splitPlatformFile(platform_file)
args = getPlatformArgs(platform_text_header, args)
variant_folder = args['build.variants_folder']
cores_folder = args['build.cores_folder']
build_core = args['build.core']
build_core_folder = os.path.join(cores_folder, build_core)
args['build.core_folder'] = build_core_folder
if 'build.variant' in args:
build_variant = args['build.variant']
build_variant_folder = os.path.join(variant_folder, build_variant)
args['build.variant.path'] = build_variant_folder
else:
args['build.variant.path'] = build_core_folder
if 'compiler.path' in args:
compiler_path = args['compiler.path']
else:
runtime_ide_path = args['runtime.ide.path']
compiler_path = runtime_ide_path + '/hardware/tools/avr/bin/'
compiler_c_cmd = args['compiler.c.cmd']
if constant.sys_platform == 'windows':
compiler_c_cmd += '.exe'
compiler_c_cmd_file = os.path.join(compiler_path, compiler_c_cmd)
if os.path.isfile(compiler_c_cmd_file):
args['compiler.path'] = compiler_path
else:
args['compiler.path'] = ''
extra_flags = constant.sketch_settings.get('extra_flag', '')
if 'build.extra_flags' in args:
build_extra_flags = args['build.extra_flags']
else:
build_extra_flags = ''
if extra_flags:
build_extra_flags += ' '
build_extra_flags += extra_flags
args['build.extra_flags'] = build_extra_flags
args = getPlatformArgs(platform_text_body, args)
return args
def getLibFolderListFromProject(cur_project, arduino_info):
lib_folder_list = []
platform_list = arduino_info.getPlatformList()
platform_id = constant.sketch_settings.get('platform', 1)
general_platform = platform_list[0]
selected_platform = platform_list[platform_id]
general_h_lib_dict = general_platform.getHLibDict()
selected_h_lib_dict = selected_platform.getHLibDict()
ino_src_file_list = cur_project.getInoSrcFileList()
c_src_file_list = cur_project.getCSrcFileList()
h_list = preprocess.getHListFromSrcList(ino_src_file_list + c_src_file_list)
for h in h_list:
lib_folder = ''
if h in selected_h_lib_dict:
lib_folder = selected_h_lib_dict[h]
elif h in general_h_lib_dict:
lib_folder = general_h_lib_dict[h]
if lib_folder:
if not lib_folder in lib_folder_list:
lib_folder_list.append(lib_folder)
return lib_folder_list
def genBuildCppFile(build_folder, cur_project, arduino_info):
project_name = cur_project.getName()
cpp_file_name = project_name + '.ino.cpp'
cpp_file = os.path.join(build_folder, cpp_file_name)
ino_src_file_list = cur_project.getInoSrcFileList()
arduino_version = arduino_info.getVersion()
doMunge = not constant.sketch_settings.get('set_bare_gcc_only', False)
preprocess.genCppFileFromInoFileList(cpp_file, ino_src_file_list, arduino_version, preprocess=doMunge)
return cpp_file
def genIncludesPara(build_folder, project_folder, core_folder_list, compiler_include_folder):
folder_list = sketch.getFolderListFromFolderList(core_folder_list)
include_folder_list = []
include_folder_list.append(build_folder)
include_folder_list.append(project_folder)
include_folder_list.append(compiler_include_folder)
include_folder_list += folder_list
includes = ''
for include_folder in include_folder_list:
includes += '"-I%s" ' % include_folder
return includes
def getCompileCommand(c_file, args, includes_para):
build_folder = args['build.path']
file_name = os.path.split(c_file)[1]
file_ext = os.path.splitext(c_file)[1]
obj_file_name = file_name + '.o'
obj_file = os.path.join(build_folder, obj_file_name)
if file_ext in ['.S']:
command = args['recipe.S.o.pattern']
elif file_ext in ['.c']:
command = args['recipe.c.o.pattern']
else:
command = args['recipe.cpp.o.pattern']
command = command.replace('{includes}', includes_para)
command = command.replace('{source_file}', c_file)
command = command.replace('{object_file}', obj_file)
cur_command = Command(command)
cur_command.setInFile(c_file)
cur_command.setOutFile(obj_file)
return cur_command
def getCompileCommandList(c_file_list, args, includes_para):
command_list = []
for c_file in c_file_list:
cur_command = getCompileCommand(c_file, args, includes_para)
command_list.append(cur_command)
return command_list
def getArCommand(args, core_command_list):
build_folder = args['build.path']
archive_file_name = args['archive_file']
archive_file = os.path.join(build_folder, archive_file_name)
object_files = ''
for core_command in core_command_list:
core_obj_file = core_command.getOutFile()
object_files += '"%s" ' % core_obj_file
object_files = object_files[:-1]
command_text = args['recipe.ar.pattern']
command_text = command_text.replace('"{object_file}"', object_files)
ar_command = Command(command_text)
ar_command.setOutFile(archive_file)
return ar_command
def getElfCommand(args, project_command_list):
build_folder = args['build.path']
project_name = args['build.project_name']
elf_file_name = project_name + '.elf'
elf_file = os.path.join(build_folder, elf_file_name)
object_files = ''
for project_command in project_command_list:
project_obj_file = project_command.getOutFile()
object_files += '"%s" ' % project_obj_file
object_files = object_files[:-1]
command_text = args['recipe.c.combine.pattern']
command_text = command_text.replace('{object_files}', object_files)
elf_command = Command(command_text)
elf_command.setOutFile(elf_file)
return elf_command
def getEepCommand(args):
build_folder = args['build.path']
project_name = args['build.project_name']
eep_file_name = project_name + '.eep'
eep_file = os.path.join(build_folder, eep_file_name)
command_text = args['recipe.objcopy.eep.pattern']
eep_command = Command(command_text)
eep_command.setOutFile(eep_file)
return eep_command
def getHexCommand(args):
command_text = args['recipe.objcopy.hex.pattern']
hex_command = Command(command_text)
build_folder = args['build.path']
project_name = args['build.project_name']
ext = command_text[-5:-1]
hex_file_name = project_name + ext
hex_file = os.path.join(build_folder, hex_file_name)
hex_command.setOutFile(hex_file)
return hex_command
def getSizeCommand(args):
command_text = args['recipe.size.pattern']
command_text = command_text.replace('-A', '')
command_text = command_text.replace('.hex', '.elf')
size_command = Command(command_text)
size_command.setSizeCommand()
return size_command
def genCommandList(args, cur_project, arduino_info):
build_folder = args['build.path']
project_folder = cur_project.getFolder()
build_cpp_file = genBuildCppFile(build_folder, cur_project, arduino_info)
build_core_folder = args['build.core_folder']
build_variant_folder = args['build.variant.path']
lib_folder_list = getLibFolderListFromProject(cur_project, arduino_info)
core_folder_list = [build_core_folder, build_variant_folder] + lib_folder_list
compiler_bin_folder = args['compiler.path']
compiler_folder = os.path.split(compiler_bin_folder)[0]
compiler_folder = os.path.split(compiler_folder)[0]
compiler_name = os.path.split(compiler_folder)[1]
compiler_folder = os.path.join(compiler_folder, compiler_name)
compiler_include_folder = os.path.join(compiler_folder, 'include')
compiler_include_folder = compiler_include_folder.replace('/', os.path.sep)
# core_folder_list.append(compiler_include_folder)
includes_para = genIncludesPara(build_folder, project_folder, core_folder_list, compiler_include_folder)
project_C_file_list = [build_cpp_file] + cur_project.getCSrcFileList() + cur_project.getAsmSrcFileList()
core_C_file_list = sketch.getCSrcFileListFromFolderList(core_folder_list) + sketch.getAsmSrcFileListFromFolderList(core_folder_list)
project_command_list = getCompileCommandList(project_C_file_list, args, includes_para)
core_command_list = getCompileCommandList(core_C_file_list, args, includes_para)
ar_command = getArCommand(args, core_command_list)
elf_command = getElfCommand(args, project_command_list)
eep_command = getEepCommand(args)
hex_command = getHexCommand(args)
size_command = getSizeCommand(args)
full_compilation = constant.sketch_settings.get('full_compilation', True)
archive_file_name = args['archive_file']
archive_file = os.path.join(build_folder, archive_file_name)
if not os.path.isfile(archive_file):
full_compilation = True
command_list = []
command_list += project_command_list
if full_compilation:
if os.path.isfile(archive_file):
os.remove(archive_file)
command_list += core_command_list
command_list.append(ar_command)
command_list.append(elf_command)
if args['recipe.objcopy.eep.pattern']:
command_list.append(eep_command)
command_list.append(hex_command)
command_list.append(size_command)
return command_list
def getCommandList(cur_project, arduino_info):
command_list = []
args = getFullArgs(cur_project, arduino_info)
if args:
command_list = genCommandList(args, cur_project, arduino_info)
return command_list
def printSizeInfo(output_console, stdout, args):
flash_size_key = 'upload.maximum_size'
ram_size_key = 'upload.maximum_ram_size'
max_flash_size = int(args[flash_size_key])
max_ram_size = int(args[ram_size_key])
size_line = stdout.split('\n')[-2].strip()
info_list = re.findall(r'\S+', size_line)
text_size = int(info_list[0])
data_size = int(info_list[1])
bss_size = int(info_list[2])
flash_size = text_size + data_size
ram_size = data_size + bss_size
flash_percent = float(flash_size) / max_flash_size * 100
text = 'Binary sketch size: %d bytes (of a %d byte maximum, %.2f percent).\n' % (flash_size, max_flash_size, flash_percent)
if max_ram_size > 0:
ram_percent = float(ram_size) / max_ram_size * 100
text += 'Estimated memory use: %d bytes (of a %d byte maximum, %.2f percent).\n' % (ram_size, max_ram_size, ram_percent)
output_console.printText(text)
def formatCommand(command):
if constant.sys_version < 3:
if constant.sys_platform == 'windows':
command = command.replace('/"', '"')
command = command.replace('/', os.path.sep)
command = '"' + command + '"'
if constant.sys_version < 3:
if isinstance(command, unicode):
command = command.encode(constant.sys_encoding)
return command
| |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""Clone of Nmap's first generation OS fingerprinting.
This code works with the first-generation OS detection and
nmap-os-fingerprints, which has been removed from Nmap on November 3,
2007 (https://github.com/nmap/nmap/commit/50c49819), which means it is
outdated.
To get the last published version of this outdated fingerprint
database, you can fetch it from
<https://raw.githubusercontent.com/nmap/nmap/9efe1892/nmap-os-fingerprints>.
"""
from __future__ import absolute_import
import os
import re
from scapy.data import KnowledgeBase
from scapy.config import conf
from scapy.arch import WINDOWS
from scapy.error import warning
from scapy.layers.inet import IP, TCP, UDP, ICMP, UDPerror, IPerror
from scapy.packet import NoPayload
from scapy.sendrecv import sr
import scapy.modules.six as six
if WINDOWS:
conf.nmap_base = os.environ["ProgramFiles"] + "\\nmap\\nmap-os-fingerprints"
else:
conf.nmap_base = "/usr/share/nmap/nmap-os-fingerprints"
######################
## nmap OS fp stuff ##
######################
_NMAP_LINE = re.compile('^([^\\(]*)\\(([^\\)]*)\\)$')
class NmapKnowledgeBase(KnowledgeBase):
"""A KnowledgeBase specialized in Nmap first-generation OS
fingerprints database. Loads from conf.nmap_base when self.filename is
None.
"""
def lazy_init(self):
try:
fdesc = open(conf.nmap_base
if self.filename is None else
self.filename, "rb")
except (IOError, TypeError):
warning("Cannot open nmap database [%s]" % self.filename)
self.filename = None
return
self.base = []
name = None
sig = {}
for line in fdesc:
line = line.split('#', 1)[0].strip()
if not line:
continue
if line.startswith("Fingerprint "):
if name is not None:
self.base.append((name, sig))
name = line[12:].strip()
sig = {}
continue
if line.startswith("Class "):
continue
line = _NMAP_LINE.search(line)
if line is None:
continue
test, values = line.groups()
sig[test] = dict(val.split('=', 1) for val in
(values.split('%') if values else []))
if name is not None:
self.base.append((name, sig))
fdesc.close()
nmap_kdb = NmapKnowledgeBase(None)
def nmap_tcppacket_sig(pkt):
res = {}
if pkt is not None:
res["DF"] = "Y" if pkt.flags.DF else "N"
res["W"] = "%X" % pkt.window
res["ACK"] = "S++" if pkt.ack == 2 else "S" if pkt.ack == 1 else "O"
res["Flags"] = str(pkt[TCP].flags)[::-1]
res["Ops"] = "".join(x[0][0] for x in pkt[TCP].options)
else:
res["Resp"] = "N"
return res
def nmap_udppacket_sig(snd, rcv):
res = {}
if rcv is None:
res["Resp"] = "N"
else:
res["DF"] = "Y" if rcv.flags.DF else "N"
res["TOS"] = "%X" % rcv.tos
res["IPLEN"] = "%X" % rcv.len
res["RIPTL"] = "%X" % rcv.payload.payload.len
res["RID"] = "E" if snd.id == rcv[IPerror].id else "F"
res["RIPCK"] = "E" if snd.chksum == rcv[IPerror].chksum else (
"0" if rcv[IPerror].chksum == 0 else "F"
)
res["UCK"] = "E" if snd.payload.chksum == rcv[UDPerror].chksum else (
"0" if rcv[UDPerror].chksum == 0 else "F"
)
res["ULEN"] = "%X" % rcv[UDPerror].len
res["DAT"] = "E" if (
isinstance(rcv[UDPerror].payload, NoPayload) or
str(rcv[UDPerror].payload) == str(snd[UDP].payload)
) else "F"
return res
def nmap_match_one_sig(seen, ref):
cnt = sum(val in ref.get(key, "").split("|")
for key, val in six.iteritems(seen))
if cnt == 0 and seen.get("Resp") == "N":
return 0.7
return float(cnt) / len(seen)
def nmap_sig(target, oport=80, cport=81, ucport=1):
res = {}
tcpopt = [("WScale", 10),
("NOP", None),
("MSS", 256),
("Timestamp", (123, 0))]
tests = [
IP(dst=target, id=1) /
TCP(seq=1, sport=5001 + i, dport=oport if i < 4 else cport,
options=tcpopt, flags=flags)
for i, flags in enumerate(["CS", "", "SFUP", "A", "S", "A", "FPU"])
]
tests.append(IP(dst=target)/UDP(sport=5008, dport=ucport)/(300 * "i"))
ans, unans = sr(tests, timeout=2)
ans.extend((x, None) for x in unans)
for snd, rcv in ans:
if snd.sport == 5008:
res["PU"] = (snd, rcv)
else:
test = "T%i" % (snd.sport - 5000)
if rcv is not None and ICMP in rcv:
warning("Test %s answered by an ICMP" % test)
rcv = None
res[test] = rcv
return nmap_probes2sig(res)
def nmap_probes2sig(tests):
tests = tests.copy()
res = {}
if "PU" in tests:
res["PU"] = nmap_udppacket_sig(*tests["PU"])
del tests["PU"]
for k in tests:
res[k] = nmap_tcppacket_sig(tests[k])
return res
def nmap_search(sigs):
guess = 0, []
for osval, fprint in nmap_kdb.get_base():
score = 0.0
for test, values in six.iteritems(fprint):
if test in sigs:
score += nmap_match_one_sig(sigs[test], values)
score /= len(sigs)
if score > guess[0]:
guess = score, [osval]
elif score == guess[0]:
guess[1].append(osval)
return guess
@conf.commands.register
def nmap_fp(target, oport=80, cport=81):
"""nmap fingerprinting
nmap_fp(target, [oport=80,] [cport=81,]) -> list of best guesses with accuracy
"""
sigs = nmap_sig(target, oport, cport)
return nmap_search(sigs)
@conf.commands.register
def nmap_sig2txt(sig):
torder = ["TSeq", "T1", "T2", "T3", "T4", "T5", "T6", "T7", "PU"]
korder = ["Class", "gcd", "SI", "IPID", "TS",
"Resp", "DF", "W", "ACK", "Flags", "Ops",
"TOS", "IPLEN", "RIPTL", "RID", "RIPCK", "UCK", "ULEN", "DAT"]
txt = []
for i in sig:
if i not in torder:
torder.append(i)
for test in torder:
testsig = sig.get(test)
if testsig is None:
continue
txt.append("%s(%s)" % (test, "%".join(
"%s=%s" % (key, testsig[key]) for key in korder if key in testsig
)))
return "\n".join(txt)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inventory handlers for Placement API."""
import copy
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import webob
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
RESOURCE_CLASS_IDENTIFIER = "^[A-Z0-9_]+$"
BASE_INVENTORY_SCHEMA = {
"type": "object",
"properties": {
"resource_provider_generation": {
"type": "integer"
},
"total": {
"type": "integer",
"maximum": db.MAX_INT,
"minimum": 1,
},
"reserved": {
"type": "integer",
"maximum": db.MAX_INT,
"minimum": 0,
},
"min_unit": {
"type": "integer",
"maximum": db.MAX_INT,
"minimum": 1
},
"max_unit": {
"type": "integer",
"maximum": db.MAX_INT,
"minimum": 1
},
"step_size": {
"type": "integer",
"maximum": db.MAX_INT,
"minimum": 1
},
"allocation_ratio": {
"type": "number",
"maximum": db.SQL_SP_FLOAT_MAX
},
},
"required": [
"total",
"resource_provider_generation"
],
"additionalProperties": False
}
POST_INVENTORY_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA)
POST_INVENTORY_SCHEMA['properties']['resource_class'] = {
"type": "string",
"pattern": RESOURCE_CLASS_IDENTIFIER,
}
POST_INVENTORY_SCHEMA['required'].append('resource_class')
POST_INVENTORY_SCHEMA['required'].remove('resource_provider_generation')
PUT_INVENTORY_RECORD_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA)
PUT_INVENTORY_RECORD_SCHEMA['required'].remove('resource_provider_generation')
PUT_INVENTORY_SCHEMA = {
"type": "object",
"properties": {
"resource_provider_generation": {
"type": "integer"
},
"inventories": {
"type": "object",
"patternProperties": {
RESOURCE_CLASS_IDENTIFIER: PUT_INVENTORY_RECORD_SCHEMA,
}
}
},
"required": [
"resource_provider_generation",
"inventories"
],
"additionalProperties": False
}
# NOTE(cdent): We keep our own representation of inventory defaults
# and output fields, separate from the versioned object to avoid
# inadvertent API changes when the object defaults are changed.
OUTPUT_INVENTORY_FIELDS = [
'total',
'reserved',
'min_unit',
'max_unit',
'step_size',
'allocation_ratio',
]
INVENTORY_DEFAULTS = {
'reserved': 0,
'min_unit': 1,
'max_unit': db.MAX_INT,
'step_size': 1,
'allocation_ratio': 1.0
}
def _extract_inventory(body, schema):
"""Extract and validate inventory from JSON body."""
data = util.extract_json(body, schema)
inventory_data = copy.copy(INVENTORY_DEFAULTS)
inventory_data.update(data)
return inventory_data
def _extract_inventories(body, schema):
"""Extract and validate multiple inventories from JSON body."""
data = util.extract_json(body, schema)
inventories = {}
for res_class, raw_inventory in data['inventories'].items():
inventory_data = copy.copy(INVENTORY_DEFAULTS)
inventory_data.update(raw_inventory)
inventories[res_class] = inventory_data
data['inventories'] = inventories
return data
def _make_inventory_object(resource_provider, resource_class, **data):
"""Single place to catch malformed Inventories."""
# TODO(cdent): Some of the validation checks that are done here
# could be done via JSONschema (using, for example, "minimum":
# 0) for non-negative integers. It's not clear if that is
# duplication or decoupling so leaving it as this for now.
try:
inventory = objects.Inventory(
resource_provider=resource_provider,
resource_class=resource_class, **data)
except (ValueError, TypeError) as exc:
raise webob.exc.HTTPBadRequest(
_('Bad inventory %(class)s for resource provider '
'%(rp_uuid)s: %(error)s') % {'class': resource_class,
'rp_uuid': resource_provider.uuid,
'error': exc})
return inventory
def _send_inventories(response, resource_provider, inventories):
"""Send a JSON representation of a list of inventories."""
response.status = 200
response.body = encodeutils.to_utf8(jsonutils.dumps(
_serialize_inventories(inventories, resource_provider.generation)))
response.content_type = 'application/json'
return response
def _send_inventory(response, resource_provider, inventory, status=200):
"""Send a JSON representation of one single inventory."""
response.status = status
response.body = encodeutils.to_utf8(jsonutils.dumps(_serialize_inventory(
inventory, generation=resource_provider.generation)))
response.content_type = 'application/json'
return response
def _serialize_inventory(inventory, generation=None):
"""Turn a single inventory into a dictionary."""
data = {
field: getattr(inventory, field)
for field in OUTPUT_INVENTORY_FIELDS
}
if generation:
data['resource_provider_generation'] = generation
return data
def _serialize_inventories(inventories, generation):
"""Turn a list of inventories in a dict by resource class."""
inventories_by_class = {inventory.resource_class: inventory
for inventory in inventories}
inventories_dict = {}
for resource_class, inventory in inventories_by_class.items():
inventories_dict[resource_class] = _serialize_inventory(
inventory, generation=None)
return {'resource_provider_generation': generation,
'inventories': inventories_dict}
@wsgi_wrapper.PlacementWsgify
@util.require_content('application/json')
def create_inventory(req):
"""POST to create one inventory.
On success return a 201 response, a location header pointing
to the newly created inventory and an application/json representation
of the inventory.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
data = _extract_inventory(req.body, POST_INVENTORY_SCHEMA)
resource_class = data.pop('resource_class')
inventory = _make_inventory_object(resource_provider,
resource_class,
**data)
try:
resource_provider.add_inventory(inventory)
except (exception.ConcurrentUpdateDetected,
db_exc.DBDuplicateEntry) as exc:
raise webob.exc.HTTPConflict(
_('Update conflict: %(error)s') % {'error': exc})
except (exception.InvalidInventoryCapacity,
exception.NotFound) as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to create inventory for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
response = req.response
response.location = util.inventory_url(
req.environ, resource_provider, resource_class)
return _send_inventory(response, resource_provider, inventory,
status=201)
@wsgi_wrapper.PlacementWsgify
def delete_inventory(req):
"""DELETE to destroy a single inventory.
If the inventory is in use or resource provider generation is out
of sync return a 409.
On success return a 204 and an empty body.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_class = util.wsgi_path_item(req.environ, 'resource_class')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
try:
resource_provider.delete_inventory(resource_class)
except (exception.ConcurrentUpdateDetected,
exception.InventoryInUse) as exc:
raise webob.exc.HTTPConflict(
_('Unable to delete inventory of class %(class)s: %(error)s') %
{'class': resource_class, 'error': exc})
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_('No inventory of class %(class)s found for delete: %(error)s') %
{'class': resource_class, 'error': exc})
response = req.response
response.status = 204
response.content_type = None
return response
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
def get_inventories(req):
"""GET a list of inventories.
On success return a 200 with an application/json body representing
a collection of inventories.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
try:
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("No resource provider with uuid %(uuid)s found : %(error)s") %
{'uuid': uuid, 'error': exc})
inventories = objects.InventoryList.get_all_by_resource_provider_uuid(
context, resource_provider.uuid)
return _send_inventories(req.response, resource_provider, inventories)
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
def get_inventory(req):
"""GET one inventory.
On success return a 200 an application/json body representing one
inventory.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_class = util.wsgi_path_item(req.environ, 'resource_class')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
inventory = objects.InventoryList.get_all_by_resource_provider_uuid(
context, resource_provider.uuid).find(resource_class)
if not inventory:
raise webob.exc.HTTPNotFound(
_('No inventory of class %(class)s for %(rp_uuid)s') %
{'class': resource_class, 'rp_uuid': resource_provider.uuid})
return _send_inventory(req.response, resource_provider, inventory)
@wsgi_wrapper.PlacementWsgify
@util.require_content('application/json')
def set_inventories(req):
"""PUT to set all inventory for a resource provider.
Create, update and delete inventory as required to reset all
the inventory.
If the resource generation is out of sync, return a 409.
If an inventory to be deleted is in use, return a 409.
If any inventory to be created or updated has settings which are
invalid (for example reserved exceeds capacity), return a 400.
On success return a 200 with an application/json body representing
the inventories.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
data = _extract_inventories(req.body, PUT_INVENTORY_SCHEMA)
if data['resource_provider_generation'] != resource_provider.generation:
raise webob.exc.HTTPConflict(
_('resource provider generation conflict'))
inv_list = []
for res_class, inventory_data in data['inventories'].items():
inventory = _make_inventory_object(
resource_provider, res_class, **inventory_data)
inv_list.append(inventory)
inventories = objects.InventoryList(objects=inv_list)
try:
resource_provider.set_inventory(inventories)
except exception.ResourceClassNotFound as exc:
raise webob.exc.HTTPBadRequest(
_('Unknown resource class in inventory for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
except exception.InventoryWithResourceClassNotFound as exc:
raise webob.exc.HTTPConflict(
_('Race condition detected when setting inventory. No inventory '
'record with resource class for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
except (exception.ConcurrentUpdateDetected,
exception.InventoryInUse,
db_exc.DBDuplicateEntry) as exc:
raise webob.exc.HTTPConflict(
_('update conflict: %(error)s') % {'error': exc})
except exception.InvalidInventoryCapacity as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to update inventory for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
return _send_inventories(req.response, resource_provider, inventories)
@wsgi_wrapper.PlacementWsgify
def delete_inventories(req):
"""DELETE all inventory for a resource provider.
Delete inventory as required to reset all the inventory.
If an inventory to be deleted is in use, return a 409 Conflict.
On success return a 204 No content.
Return 405 Method Not Allowed if the wanted microversion does not match.
"""
microversion.raise_http_status_code_if_not_version(req, 405, (1, 5))
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
inventories = objects.InventoryList(objects=[])
try:
resource_provider.set_inventory(inventories)
except (exception.ConcurrentUpdateDetected,
exception.InventoryInUse) as exc:
raise webob.exc.HTTPConflict(
_('update conflict: %(error)s') % {'error': exc})
response = req.response
response.status = 204
response.content_type = None
return response
@wsgi_wrapper.PlacementWsgify
@util.require_content('application/json')
def update_inventory(req):
"""PUT to update one inventory.
If the resource generation is out of sync, return a 409.
If the inventory has settings which are invalid (for example
reserved exceeds capacity), return a 400.
On success return a 200 with an application/json body representing
the inventory.
"""
context = req.environ['placement.context']
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_class = util.wsgi_path_item(req.environ, 'resource_class')
resource_provider = objects.ResourceProvider.get_by_uuid(
context, uuid)
data = _extract_inventory(req.body, BASE_INVENTORY_SCHEMA)
if data['resource_provider_generation'] != resource_provider.generation:
raise webob.exc.HTTPConflict(
_('resource provider generation conflict'))
inventory = _make_inventory_object(resource_provider,
resource_class,
**data)
try:
resource_provider.update_inventory(inventory)
except (exception.ConcurrentUpdateDetected,
db_exc.DBDuplicateEntry) as exc:
raise webob.exc.HTTPConflict(
_('update conflict: %(error)s') % {'error': exc})
except exception.InventoryWithResourceClassNotFound as exc:
raise webob.exc.HTTPBadRequest(
_('No inventory record with resource class for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
except exception.InvalidInventoryCapacity as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to update inventory for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
return _send_inventory(req.response, resource_provider, inventory)
| |
"""
PDF Parser object
"""
import io
from .exc import PdfParseError
from .pdf_types import PdfRaw, PdfRawData, PdfDict, PdfObjectReference,\
PdfLiteralString, PdfHexString, PdfComment, \
PdfIndirectObject, PdfArray, PdfName, PdfStream
from .misc import BlackHole, buffer_data, consume_whitespace
from .pdf_constants import EOLS, WHITESPACE
__all__ = ['PdfParser']
class PdfParser(object):
"""Parser for PDF files. Takes raw PDF data and turns it into PDF _types_,
which can then be assembled into a document and document elements."""
DELIMITERS = set([b'/', b'<', b'(', b'{', b'[', b'%'])
ENDERS = WHITESPACE.union(DELIMITERS)
def __init__(self, document=None):
"""Initialize the PdfParser with a default PdfDocument"""
from .pdf_doc import PdfDocument
if document is None or isinstance(document, PdfDocument):
self._doc = document
else:
raise PdfParseError('document must be either None or a PdfParser')
def parse_simple_object(self, data, position=None):
"""Parse and return the simple object (i.e., not an indirect object)
described in the first argument located at either current stream
position or the position specified by the optional argument. The
stream's position will be left at the end of the object.
This method should only be used in places where an indirect object
reference is not valid."""
if position is not None:
data.seek(position)
token = self._get_next_token(data)
return self._process_token(data, token, None)
def parse_indirect_object(self, data, position=None):
"""Parse and return the indirect object described in the first argument
located at either current stream position or the position specified by
the optional argument. The stream's position will be left at the end of the object."""
if position is not None:
data.seek(position)
obj_no = self._process_token(data, self._get_next_token(data), None)
obj_gen = self._process_token(data, self._get_next_token(data), None)
if not isinstance(obj_no, int) or not isinstance(obj_gen, int):
raise PdfParseError('Object identification not found')
token = self._get_next_token(data)
if token != b'obj':
raise PdfParseError("Expected 'obj', got '{}'".format(token))
return self.parse_ind_object(data, [obj_no, obj_gen])
def _get_objects(self, data, closer=None):
"""Get all of the objects in data starting from the current position
until hitting EOF or the optional closer argument. Returns a list of
PdfTypes, ints, floats, and bools.
TODO: Restore PdfInt, etc."""
objects = []
while data.peek(1):
token = self._get_next_token(data, closer)
if not token: continue
if token == closer: break
element = self._process_token(data, token, objects)
if token not in (b'obj', b'xref'):
objects.append(element)
return objects
def parse_list(self, data, allow_invalid=False, disallowed=frozenset()):
"""Parse the data and return a list"""
data = buffer_data(data)
return [i for i in self.iterparse(data, allow_invalid, disallowed)]
def iterparse(self, data, allow_invalid=True,
disallowed=frozenset({b'R', b'obj', b'stream'})):
"""Generator-parser primarily for use in content streams."""
data = buffer_data(data)
while data.peek(1):
token = self._get_next_token(data, disallowed=disallowed)
if not token: continue
element = self._process_token(data, token, BlackHole(),
allow_invalid)
yield element
if isinstance(element, PdfRaw) and element == b'BI':
for i in self._parse_inline_image(data, disallowed):
yield i
def _parse_inline_image(self, data, disallowed):
"""Special method for handling inline images in content streams because
they are absolutely awful.
See Reference pp. 352-355"""
attrs = []
token = None
while data.peek(1) and token != b'ID':
token = self._get_next_token(data, disallowed=disallowed)
if not token: continue
attrs.append(self._process_token(data,token,BlackHole, True))
yield PdfDict({attrs[i]:attrs[i+1] for i in range(0,len(attrs)-1,2)})
data.read(1)
img = io.BytesIO()
buf = bytes(2)
while buf != b'EI':
buf = buf[1:]+data.read(1)
img.write(buf[1:])
yield PdfRawData(img.getvalue()[:-2]) # This is such an ugly hack
yield PdfRaw(b'EI')
@staticmethod
def _peek(data, n=1):
"""Peek ahead, returning the requested number of characters. If peek()
doesn't yield enough data, read and backup."""
if n <= 0: return b''
res = data.peek(n)[:n]
if len(res) == n:
return res
res += data.read(n-len(res))
data.seek(-len(res), 1)
return res
@classmethod
def _get_next_token(cls, data, closer=None, disallowed=frozenset()):
"""Get the next token in the stream, data. Closer is an optional
argument specifying the ending token of the current data structure,
e.g., >> for dicts."""
clen = len(closer) if closer is not None else None
token = io.BytesIO()
consume_whitespace(data, WHITESPACE)
while data.peek(1) and (token.getvalue() != closer) \
and not cls._is_token(data, token.getvalue(),
closer, clen, disallowed):
token.write(data.read(1))
return token.getvalue()
@classmethod
def _is_token(cls, data, value, closer=None, clen=None,
disallowed=frozenset()):
"""Is this a token?"""
if closer and not clen:
clen = len(closer)
if not data.peek(1): return True
elif not value: return False
next_char = cls._peek(data, 1)
not_obj = (value+next_char) not in cls.obj_types
if value in cls.obj_types and not_obj and value not in disallowed:
return True
elif closer and cls._peek(data, clen) == closer \
and value+cls._peek(data, clen-len(value)) != closer: #Last clause covers an issue with
return True #a dict as the last element of a dict
elif next_char in cls.ENDERS and not_obj:
return True
return False
def _process_token(self, data, token, objects, allow_invalid=False):
"""Process the data at the current position in the stream data into the
data type indicated by token.
Optional arguments:
objects - A list of objects already known.
allow_invalid - Don't raise an exception when an invalid token is
encountered, instead returning a PdfRaw object."""
try:
return self.obj_types[token](self, data, objects)
except KeyError:
try:
return self.parse_literal(token)
except PdfParseError:
#This lets us use this parse Content Streams
if allow_invalid: return PdfRaw(token)
else: raise
def parse_reference(self, data, objects):
"""References an indirect object, which may or may not have already
been defined."""
generation = objects.pop()
obj_no = objects.pop()
return PdfObjectReference(obj_no, generation, self._doc)
def parse_dict(self, data, objects):
"""A dict is just represented as a differently delimited array, so
we'll call that to get the elements"""
elems = self.parse_array(data, objects, b'>>')
return PdfDict(zip(elems[::2], elems[1::2]))
def parse_hex_string(self, data, objects):
"""Extract a PdfHexString from raw data"""
token = io.BytesIO(data.read(1))
token.seek(0, 2)
while data.peek(1) and token.getvalue()[-1:] != b'>':
token.write(data.read(1))
return PdfHexString(token.getvalue()[:-1])
def parse_literal_string(self, data, objects):
"""Extract a PdfLiteralString from raw data"""
token = io.BytesIO()
parens = 0
escaped = False
while data.peek(1):
b = data.read(1)
if escaped:
escaped = False
elif b == b'\\':
escaped = True
elif b == b'(':
parens += 1
elif b == b')':
if parens == 0:
return PdfLiteralString(token.getvalue())
else:
parens -= 1
token.write(b)
raise PdfParseError('Unterminated string literal')
def parse_array(self, data, objects, closer=b']'):
"""Extract a PdfArray from the data stream"""
elems = self._get_objects(data, closer)
return PdfArray(elems)
def parse_comment(self, data, objects):
"""Extract a PdfComment from the data stream"""
token = io.BytesIO()
while data.peek(1):
b = data.read(1)
if b in EOLS: break
token.write(b)
else:
return PdfComment(token.getvalue())
def parse_expression(self, data, objects):
"""TODO: This"""
pass
def parse_ind_object(self, data, objects):
"""Extract an indirect object from the data stream"""
gen = objects.pop()
obj_no = objects.pop()
obj = self._get_objects(data, closer=b'endobj')
return PdfIndirectObject(obj_no, gen, obj[0] if obj else None,
self._doc)
def parse_stream(self, data, objects):
"""Extract a PdfStream from the data stream"""
header = objects.pop()
lngth = header['Length']
if isinstance(lngth, PdfObjectReference):
lngth = lngth.value
if data.peek(1)[:1] == b'\r': data.read(1)
if data.peek(1)[:1] == b'\n': data.read(1)
s_data = data.read(lngth)
# Long peeks are not guaranteed to work, so we're going to do this
# hackish read/seek for now
close = data.read(11)
if close == b'\r\nendstream':
pass
elif close[:-1] == b'\nendstream':
data.seek(-1, 1)
elif close[:-2] == b'endstream':
data.seek(-2, 1)
else:
raise PdfParseError('endstream not found')
return PdfStream(header, s_data)
@staticmethod
def parse_literal(token):
"""Parse a simple literal number, boolean, or null"""
token = bytes(token)
if token == b'true': return True
elif token == b'false': return False
elif token == b'null': return None
elif token[:1] == b'/':
return PdfParser.parse_name(token)
else:
try:
return PdfParser.parse_number(token)
except ValueError:
raise PdfParseError('Invalid token found: '+repr(token))
@staticmethod
def parse_number(token):
"""Extract a numeric type from the data stream"""
try:
return int(token)
except ValueError:
return float(token)
@staticmethod
def parse_name(token):
"""Parse the token into a PdfName"""
return PdfName.from_token(token)
# dict of PDF object types besides literals to look for.
# Keys are the token that identifies that beginning of that type,
# and values are method that does the parsing
# This dict does not need to include simple literals (numerics, booleans
# nulls, and names). Each of these processing functions takes one argument,
# the current scope's objects list.
obj_types = {b'<<' : parse_dict,
b'<' : parse_hex_string,
b'(' : parse_literal_string,
b'[' : parse_array,
b'%' : parse_comment,
b'{' : parse_expression, # TODO
b'R' : parse_reference,
b'obj' : parse_ind_object,
b'stream' : parse_stream,
}
# List methods
def append(self): return None
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def all_messages():
"""
keep all messages in id
Returns:
all messages in JSON
"""
return \
{
"scan_started": "Mesin Nettacker mulai ...",
"options": "python nettacker.py [opsi]",
"help_menu": "Tampilkan Menu Bantuan Nettacker",
"license": "Harap baca lisensi dan perjanjian https://github.com/zdresearch/OWASP-Nettacker",
"engine": "Mesin",
"engine_input": "Opsi masukan mesin",
"select_language": "pilih bahasa {0}",
"range": "pindai semua IP dalam rentang",
"subdomains": "cari dan pindai subdomain",
"thread_number_connections": "nomor utas untuk koneksi ke host",
"thread_number_hosts": "nomor utas untuk host pemindaian",
"save_logs": "simpan semua log dalam file (results.txt, results.html, results.json)",
"target": "Target",
"target_input": "Opsi masukan target",
"target_list": "daftar target (s), terpisah dengan \",\"",
"read_target": "baca target (s) dari file",
"scan_method_options": "Pindai opsi metode",
"choose_scan_method": "pilih metode pemindaian {0}",
"exclude_scan_method": "pilih metode pemindaian untuk mengecualikan {0}",
"username_list": "daftar nama pengguna (s), terpisah dengan \",\"",
"username_from_file": "baca nama pengguna (s) dari file",
"password_seperator": "daftar kata sandi, terpisah dengan \",\"",
"read_passwords": "baca kata sandi (s) dari file",
"port_seperator": "daftar port (s), terpisah dengan \",\"",
"time_to_sleep": "waktu untuk tidur di antara setiap permintaan",
"error_target": "Tidak dapat menentukan target (s)",
"error_target_file": "Tidak dapat menentukan target (s), tidak dapat membuka file: {0}",
"thread_number_warning": "lebih baik menggunakan nomor utas lebih rendah dari 100, BTW kami terus ...",
"set_timeout": "mengatur waktu tunggu hingga {0} detik, itu terlalu besar, bukan? "
"dengan cara kita melanjutkan ...",
"scan_module_not_found": "modul pemindaian ini [{0}] tidak ditemukan!",
"error_exclude_all": "Anda tidak dapat mengecualikan semua metode pemindaian",
"exclude_module_error": "{0} modul yang Anda pilih untuk dikecualikan tidak ditemukan!",
"method_inputs": "masukkan input metode, contoh: ftp_brute_users = test, admin & "
"ftp_brute_passwds = baca_from_file: /tmp/pass.txt&ftp_brute_port=21",
"error_reading_file": "tidak bisa membaca file {0}",
"error_username": "Tidak dapat menentukan nama pengguna (s), tidak dapat membuka file: {0}",
"found": "{0} ditemukan! ({1}: {2})",
"error_password_file": "Tidak dapat menentukan kata sandi (s), tidak dapat membuka file: {0}",
"file_write_error": "file \"{0}\" tidak dapat ditulis!",
"scan_method_select": "silakan pilih metode pemindaian Anda!",
"remove_temp": "menghapus file temp!",
"sorting_results": "hasil penyortiran!",
"done": "selesai!",
"start_attack": "mulai menyerang {0}, {1} dari {2}",
"module_not_available": "modul ini \"{0}\" tidak tersedia",
"error_platform": "sayangnya versi perangkat lunak ini hanya bisa dijalankan di linux / osx / windows.",
"python_version_error": "Versi Python Anda tidak didukung!",
"skip_duplicate_target": "lewati target duplikat (beberapa subdomain / domain mungkin "
"memiliki IP dan Rentang yang sama)",
"unknown_target": "jenis target yang tidak diketahui [{0}]",
"checking_range": "memeriksa {0} rentang ...",
"checking": "memeriksa {0} ...",
"HOST": "TUAN RUMAH",
"USERNAME": "NAMA PENGGUNA",
"PASSWORD": "KATA SANDI",
"PORT": "PELABUHAN",
"TYPE": "MENGETIK",
"DESCRIPTION": "DESKRIPSI",
"verbose_level": "tingkat modus verbose (0-5) (default 0)",
"software_version": "tampilkan versi perangkat lunak",
"check_updates": "memeriksa pembaruan",
"outgoing_proxy": "proxy koneksi keluar (kaus kaki). contoh kaus kaki5: 127.0.0.1:9050, "
"kaus kaki: //127.0.0.1: 9050 kaus kaki5: //127.0.0.1: 9050 atau kaus "
"kaki4: kaus kaki4: //127.0.0.1: 9050, autentikasi: kaus kaki: // "
"namapengguna: kata sandi @ 127.0.0.1, socks4: // username: password@127.0.0.1, "
"socks5: // username: password@127.0.0.1",
"valid_socks_address": "masukkan alamat dan port kaus kaki yang valid. contoh kaus kaki5: "
"127.0.0.1:9050, kaus kaki: //127.0.0.1: 9050, kaus kaki5: //127.0.0.1:"
" 9050 atau kaus kaki4: kaus kaki4: //127.0.0.1: 9050, autentikasi: kaus"
" kaki: // namapengguna: kata sandi @ 127.0.0.1, socks4: // username: "
"password@127.0.0.1, socks5: // username: password@127.0.0.1",
"connection_retries": "Retries ketika batas waktu koneksi (default 3)",
"ftp_connection_timeout": "koneksi ftp ke {0}: {1} timeout, skipping {2}: {3}",
"login_successful": "DITERUKAN SECARA SUKSES!",
"login_list_error": "DITERUKAN SECARA SUKSES, IZIN DITOLAK UNTUK DAFTAR!",
"ftp_connection_failed": "koneksi ftp ke {0}: {1} gagal, melewati seluruh langkah [proses {2} "
"{3}]! akan ke langkah berikutnya",
"input_target_error": "target input untuk {0} modul harus DOMAIN, HTTP atau SINGLE_IPv4, skipping {1}",
"user_pass_found": "pengguna: {0} lulus: {1} host: {2} port: {3} ditemukan!",
"file_listing_error": "(TIDAK ADA IZIN UNTUK DAFTAR DAFTAR)",
"trying_message": "mencoba {0} dari {1} dalam proses {2} dari {3} {4}: {5} ({6})",
"smtp_connection_timeout": "koneksi smtp ke {0}: {1} timeout, skipping {2}: {3}",
"smtp_connection_failed": "koneksi smtp ke {0}: {1} gagal, melewati seluruh langkah [proses {2} {3}]! "
"akan ke langkah berikutnya",
"ssh_connection_timeout": "koneksi ssh ke {0}: {1} timeout, skipping {2}: {3}",
"ssh_connection_failed": "koneksi ssh ke {0}: {1} gagal, melewati seluruh langkah [proses {2} {3}]!"
" akan ke langkah berikutnya",
"port/type": "{0} / {1}",
"port_found": "host: {0} port: {1} ({2}) ditemukan!",
"target_submitted": "target {0} dikirimkan!",
"current_version": "Anda menjalankan OWASP Nettacker versi {0} {1} {2} {6} dengan nama kode {3} {4} {5}",
"feature_unavailable": "fitur ini belum tersedia! silakan jalankan \"git clone "
"https://github.com/zdresearch/OWASP-Nettacker.git atau install pip -U"
" OWASP-Nettacker untuk mendapatkan versi terakhir.",
"available_graph": "membangun grafik dari semua aktivitas dan informasi, Anda harus menggunakan "
"output HTML. grafik yang tersedia: {0}",
"graph_output": "untuk menggunakan fitur grafik, nama file output Anda harus diakhiri dengan "
"\".html\" atau \".htm\"!",
"build_graph": "membangun grafik ...",
"finish_build_graph": "selesaikan grafik bangunan!",
"pentest_graphs": "Grafik Pengujian Penetrasi",
"graph_message": "Grafik ini dibuat oleh OWASP Nettacker. Grafik berisi semua kegiatan modul, "
"peta jaringan, dan informasi sensitif. Jangan bagikan file ini dengan siapa pun "
"jika tidak dapat diandalkan.",
"nettacker_report": "Laporan OWASP Nettacker",
"nettacker_version_details": "Detail Perangkat Lunak: OWASP Nettacker versi {0} [{1}] di {2}",
"no_open_ports": "tidak ada port terbuka ditemukan!",
"no_user_passwords": "tidak ada pengguna / kata sandi yang ditemukan!",
"loaded_modules": "{0} modul dimuat ...",
"graph_module_404": "modul grafik ini tidak ditemukan: {0}",
"graph_module_unavailable": "modul grafik ini \"{0}\" tidak tersedia",
"ping_before_scan": "ping sebelum memindai host",
"skipping_target": "melewatkan seluruh target {0} dan metode pemindaian {1} karena "
"--ping-before-scan adalah benar dan tidak ada respon!",
"not_last_version": "Anda tidak menggunakan versi terakhir OWASP Nettacker, harap perbarui.",
"cannot_update": "tidak dapat memeriksa pembaruan, periksa koneksi internet Anda.",
"last_version": "Anda menggunakan versi terakhir OWASP Nettacker ...",
"directoy_listing": "daftar direktori ditemukan di {0}",
"insert_port_message": "tolong masukkan port melalui switch -g atau --methods-args sebagai ganti url",
"http_connection_timeout": "koneksi http {0} timeout!",
"wizard_mode": "mulai mode wizard",
"directory_file_404": "tidak ada direktori atau file yang ditemukan untuk {0} di port {1}",
"open_error": "tidak dapat membuka {0}",
"dir_scan_get": "nilai dir_scan_http_method harus GET atau HEAD, atur default ke GET.",
"list_methods": "daftar semua metode args",
"module_args_error": "tidak bisa mendapatkan argumen modul {0}",
"trying_process": "mencoba {0} dari {1} dalam proses {2} dari {3} pada {4} ({5})",
"domain_found": "domain ditemukan: {0}",
"TIME": "WAKTU",
"CATEGORY": "KATEGORI",
"module_pattern_404": "tidak dapat menemukan modul apa pun dengan pola {0}!",
"enter_default": "masukkan {0} | Default [{1}]>",
"enter_choices_default": "masukkan {0} | pilihan [{1}] | Default [{2}]>",
"all_targets": "targetnya",
"all_thread_numbers": "nomor utas",
"out_file": "nama file keluaran",
"all_scan_methods": "metode pemindaian",
"all_scan_methods_exclude": "metode pemindaian untuk dikecualikan",
"all_usernames": "nama pengguna",
"all_passwords": "kata sandi",
"timeout_seconds": "batas waktu detik",
"all_ports": "nomor port",
"all_verbose_level": "tingkat verbose",
"all_socks_proxy": "proxy kaus kaki",
"retries_number": "nomor retries",
"graph": "sebuah grafik",
"subdomain_found": "subdomain ditemukan: {0}",
"select_profile": "pilih profil {0}",
"profile_404": "profil \"{0}\" tidak ditemukan!",
"waiting": "menunggu {0}",
"vulnerable": "rentan terhadap {0}",
"target_vulnerable": "target {0}: {1} rentan terhadap {2}!",
"no_vulnerability_found": "tidak ditemukan kerentanan! ({0})",
"Method": "metode",
"API": "API",
"API_options": "Opsi API",
"start_API": "memulai layanan API",
"API_host": "Alamat host API",
"API_port": "Nomor port API",
"API_debug": "Mode debug API",
"API_access_key": "Kunci akses API",
"white_list_API": "cukup izinkan host daftar putih untuk terhubung ke API",
"define_whie_list": "mendefinisikan host daftar putih, terpisah dengan, (contoh: 127.0.0.1, "
"192.168.0.1/24, 10.0.0.1-10.0.0.255)",
"gen_API_access_log": "menghasilkan log akses API",
"API_access_log_file": "Nama file log akses API",
"API_port_int": "Port API harus berupa bilangan bulat!",
"unknown_ip_input": "jenis masukan tidak dikenal, jenis yang diterima adalah "
"SINGLE_IPv4, RANGE_IPv4, CIDR_IPv4",
"API_key": "* Kunci API: {0}",
"ports_int": "port harus berupa bilangan bulat! (mis. 80 || 80,1080 || 80,1080-1300,9000,12000-15000)",
"through_API": "Melalui API OWASP Nettacker",
"API_invalid": "kunci API tidak valid",
"unauthorized_IP": "IP Anda tidak diotorisasi",
"not_found": "Tidak ditemukan!",
"no_subdomain_found": "subdomain_scan: tidak ada subdomain yang ditemukan!",
"viewdns_domain_404": "viewdns_reverse_ip_lookup_scan: tidak ada domain yang ditemukan!",
"browser_session_valid": "sesi browser Anda valid",
"browser_session_killed": "sesi browser Anda terbunuh",
"updating_database": "memperbarui basis data ...",
"database_connect_fail": "tidak bisa terhubung ke database!",
"inserting_report_db": "memasukkan laporan ke database",
"inserting_logs_db": "memasukkan log ke database",
"removing_logs_db": "menghapus log lama dari db",
"len_subdomain_found": "{0} subdomain (s) ditemukan!",
"len_domain_found": "{0} domain (s) ditemukan!",
"phpmyadmin_dir_404": "tidak ada dir phpmyadmin ditemukan!",
"DOS_send": "mengirim paket DoS ke {0}",
"host_up": "{0} sudah habis! Waktu yang diambil untuk melakukan ping kembali adalah {1}",
"host_down": "Tidak bisa melakukan ping {0}!",
"root_required": "ini harus dijalankan sebagai root",
"admin_scan_get": "admin_scan_http_method value harus GET atau HEAD, atur default ke GET.",
"telnet_connection_timeout": "koneksi telnet ke {0}: {1} timeout, skipping {2}: {3}",
"telnet_connection_failed": "koneksi telnet ke {0}: {1} gagal, melewati seluruh langkah [proses "
"{2} dari {3}]! akan ke langkah berikutnya",
"http_auth_success": "sukses otentikasi dasar http - host: {2}: {3}, pengguna: {0}, lulus: {1} ditemukan!",
"http_auth_failed": "Otentikasi dasar http gagal {0}: {3} menggunakan {1}: {2}",
"http_form_auth_success": "keberhasilan otentikasi bentuk http - host: {2}: {3},"
" pengguna: {0}, lulus: {1} ditemukan!",
"http_form_auth_failed": "Otentikasi bentuk http gagal {0}: {3} menggunakan {1}: {2}",
"http_ntlm_success": "Keberhasilan autentikasi ntlm http: host: {2}: {3}, pengguna: "
"{0}, lulus: {1} ditemukan!",
"http_ntlm_failed": "Otentikasi ntlm http gagal {0}: {3} menggunakan {1}: {2}",
"no_response": "tidak bisa mendapatkan respons dari target",
"category_framework": "kategori: {0}, kerangka kerja: {1} ditemukan!",
"nothing_found": "tidak ditemukan apa pun di {0} dalam {1}!",
"no_auth": "Tidak ada auth yang ditemukan pada {0}: {1}"
}
| |
#!/usr/bin/python
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import sys
import array
import random
from deap import base
from deap import creator
from deap import tools
import fgeneric
import numpy as np
from operator import attrgetter
import bbobbenchmarks as bn
toolbox = base.Toolbox()
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", array.array, typecode="d",
fitness=creator.FitnessMin)
# pool = multiprocessing.Pool()
# toolbox.register("map", futures.map)
def tupleize(func):
"""A decorator that tuple-ize the result of a function. This is useful
when the evaluation function returns a single value.
"""
def wrapper(*args, **kargs):
return func(*args, **kargs),
return wrapper
def main(func,
NGEN,
CXPB,
MUTPB,
dim,
ftarget,
tournsize,
n_aval,
):
toolbox.register("attr_float", random.random)
toolbox.register("select", tools.selTournament, tournsize=tournsize)
toolbox.register(
"mutate",
tools.mutGaussian,
mu=0,
sigma=1,
indpb=0.1
)
# mutShuffleIndexes
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
# calculating the number of individuals of the
# populations based on the number of executions
y = int(n_aval / NGEN)
x = n_aval - y * NGEN
n = x + y
toolbox.register("evaluate", func)
toolbox.decorate("evaluate", tupleize)
toolbox.register("attr_float", random.uniform, -4, 4)
toolbox.register("mate", tools.cxSimulatedBinaryBounded, eta = 0, low= -4, up = 4)
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_float, dim)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
logbook = tools.Logbook()
logbook.header = "gen", "min", "avg", "max", "std"
pop = toolbox.population(n)
# get initial pop
filename = ("../pseudo-adaptative/init_pop_f" +
str(f_name) +
"_dim_" +
str(dim) +
"_tournsize_2.txt")
if((np.DataSource().exists(filename)) is True):
with open(filename, 'r') as f:
a = eval(f.readline())
f.close()
for index in range(len(pop[0])):
pop[0][index] = a[index]
# Evaluate the entire population
# 2 model.bins: real data, generated model
fitnesses = list(toolbox.map(toolbox.evaluate, pop))
# numero_avaliacoes = len(pop)
# normalize fitnesses
# fitnesses = normalizeFitness(fitnesses)
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
for g in range(NGEN):
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# create offspring
offspring = list(toolbox.map(toolbox.clone, pop))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = list(toolbox.map(toolbox.evaluate, invalid_ind))
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# The population is entirely replaced by the offspring,
# but the last ind replaced by best_pop
# Elitism
best_pop = tools.selBest(pop, 1)[0]
offspring = sorted(offspring, key=attrgetter("fitness"))
offspring[0] = best_pop
random.shuffle(offspring)
pop[:] = offspring
record = stats.compile(pop)
logbook.record(gen=g, **record)
if record["std"] < 10e-12:
best_pop = tools.selBest(pop, 1)[0]
pop = toolbox.population(n)
pop = sorted(pop, key=attrgetter("fitness"))
pop[0] = best_pop
fitnesses = list(toolbox.map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
g += 1
record = stats.compile(pop)
logbook.record(gen=g, **record)
filename = ("../SBX/init_pop_f" +
str(f_name) +
"_dim_" +
str(dim) +
"_tournsize_2.txt")
if((np.DataSource().exists(filename)) is False):
with open(filename, "w") as myfile:
for element in best_pop:
myfile.write(str(element))
myfile.write(str(', '))
myfile.write(str('\n'))
myfile.close()
return logbook
if __name__ == "__main__":
for i in range(len(sys.argv) - 1):
if (sys.argv[i] == '-params'):
gaParams = sys.argv[i + 1]
elif (sys.argv[i] == '-tournsize'):
tournsize = int(sys.argv[i + 1])
f = open(gaParams, "r")
keys = ['key', 'NGEN', 'n_aval', 'CXPB', 'MUTPB', 'dim', 'seed', 'tournsize']
params = dict()
for line in f:
if line[0] == '#':
continue
tokens = line.split()
for key, value in zip(keys, tokens):
if key == 'key':
params[key] = value
elif key == 'CXPB' or key == 'MUTPB':
params[key] = float(value)
else:
params[key] = int(value)
f.close()
# Maximum number of restart for an algorithm that detects stagnation
# Create a COCO experiment that will log the results under the
# ./output directory
e = fgeneric.LoggingFunction('output')
# Iterate over all desired test dimensions
# for dim in (2, 3, 5, 10, 20, 40):
dim = params['dim']
# Set the maximum number function evaluation granted to the algorithm
# This is usually function of the dimensionality of the problem
# Iterate over a set of benchmarks (noise free benchmarks here)
# for f_name in bn.nfreeIDs:
f_name = 19
# Iterate over all the instance of a single problem
# Rotation, translation, etc.
# for instance in chain(range(1, 6), range(21, 31)):
instance = 1
# Set the function to be used (problem) in the logger
e.setfun(*bn.instantiate(f_name, iinstance=1))
# Independent restarts until maxfunevals or ftarget is reached
# Run the algorithm with the remaining
# number of evaluations
# random.seed(params['seed'])
logbook = main(e.evalfun,
NGEN=params['NGEN'],
CXPB=params['CXPB'],
MUTPB=params['MUTPB'],
dim=dim,
n_aval=params['n_aval'],
tournsize=tournsize,
ftarget=e.ftarget)
filename = ("../SBX/f" +
str(f_name) +
"_dim_" +
str(dim) +
"_tournsize_" +
str(tournsize) +
".txt")
with open(filename, "a") as myfile:
myfile.write(str(logbook))
myfile.write(str('\n'))
myfile.close()
| |
# REST Framework
from rest_framework import generics, permissions, status
from rest_framework.decorators import api_view
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.test import APIRequestFactory, APITestCase
# User class from django
from django.contrib.auth.models import User, UserManager
# Models and Serializers
from main.serializers import UserSerializer, TopicSerializer, FeedSerializer, PostSerializer
from main.models import Topic, Feed, Post
from django.forms.models import model_to_dict
## Transaction Management
from django.db import transaction
# Python built-ins required for tests
import time
import datetime
import pytz
import traceback
class FeedCreateTests(APITestCase):
@classmethod
def setUpClass(cls):
cls.user = User.objects.create_user(username="FeedTests")
cls.user.save()
cls.f1_url = "http://home.uchicago.edu/~jharriman/example-rss.xml"
cls.f1_id = None
cls.f1 = None
cls.f1_post_list = [
{
"id": 6,
"feedURL": "http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"author": "By KATIE HAFNER",
"category": [],
"rights": "",
"title": "Bracing for the Falls of an Aging Nation",
"subtitle": "",
"content": "As Americans live longer, fall-related injuries and deaths are rising, and homes for the elderly are tackling the problem in ways large and small \u2014 even by changing the color of their carpeting and toilet seats.<img border=\"0\" height=\"1\" src=\"http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/mf.gif\" width=\"1\" /><br clear=\"all\" />",
"generator": "",
"guid": "http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html",
"url": "http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/l/0L0Snytimes0N0Cinteractive0C20A140C110C0A30Chealth0Cbracing0Efor0Ethe0Efalls0Eof0Ean0Eaging0Enation0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
"contributor": "",
"pubDate": "2014-11-02T13:43:10Z",
"updated": "2014-11-02T13:43:10Z",
"ackDate": 1415855355.56354,
"feed": 2
},
{
"id": 5,
"feedURL": "http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"author": "By LYNN VAVRECK",
"category": ["Elections, Senate","United States Politics and Government","Elections, House of Representatives", "Voting and Voters", "Midterm Elections (2014)"],
"rights": "",
"title": "Midterm Calculus: The Economy Elects Presidents. Presidents Elect Congress.",
"subtitle": "",
"content": "While presidential elections are shaped largely by economic performance, the largest factor in midterm elections is the president.",
"generator": "",
"guid": "http://www.nytimes.com/2014/11/03/upshot/the-economy-elects-presidents-presidents-elect-congress.html",
"url": "http://rss.nytimes.com/c/34625/f/642562/s/40134217/sc/1/l/0L0Snytimes0N0C20A140C110C0A30Cupshot0Cthe0Eeconomy0Eelects0Epresidents0Epresidents0Eelect0Econgress0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
"contributor": "",
"pubDate": "2014-11-02T14:00:22Z",
"updated": "2014-11-02T14:00:22Z",
"ackDate": 1415855355.55587,
"feed": 2
}]
cls.f1_details = {
"id": cls.f1_id,
"author": "",
"category": "",
"contributor": "",
"description": "US",
"docURL": "",
"editorAddr": "",
"generator": "",
"guid": "",
"language": "en-us",
"logo": "http://graphics8.nytimes.com/images/misc/NYT_logo_rss_250x40.png",
"rights": "Copyright 2014 The New York Times Company",
"subtitle": "US",
"title": "NYT > U.S.",
"webmaster": "",
"URL": "http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"ttl": 5,
"skipDays": None,
"skipHours": None,
"pubDate": "2014-11-02T16:13:02Z",
"updated": "2014-11-06T01:00:31Z",
"posts": [2,1]
}
@classmethod
def tearDownClass(cls):
cls.user.topics.get(name="Uncategorized").delete()
cls.user.delete()
# Make sure to delete the feed so we don't run into other tests
def test_create_feed(cls):
"""Test that Feed can be created by URL"""
response = cls.client.post('/feeds/create/', {"url" : cls.f1_url})
cls.assertEqual(response.status_code, 200)
# response = cls.client.get('/feeds/')
# cls.assertEqual(response.status_code, 200)
# cls.assertEqual(response.data, [{'id': 1, 'author': u'', 'category': u'',
# 'contributor': u'', 'description': u'US',
# 'docURL': u'http://www.nytimes.com/pages/national/index.html?partner=rss&emc=rss',
# 'editorAddr': u'', 'generator': u'', 'guid': u'',
# 'language': u'en-us',
# 'logo': u'http://graphics8.nytimes.com/images/misc/NYT_logo_rss_250x40.png',
# 'rights': u'Copyright 2014 The New York Times Company',
# 'subtitle': u'US', 'title': u'NYT > U.S.', 'webmaster': u'',
# 'URL': u'http://home.uchicago.edu/~jharriman/example-rss.xml',
# 'ttl': 5, 'skipDays': None, 'skipHours': None,
# 'pubDate': datetime.datetime(2014, 11, 2, 16, 13, 2, tzinfo=pytz.UTC),
# 'updated': datetime.datetime(2014, 11, 6, 1, 0, 31, tzinfo=pytz.UTC),
# 'posts': [2, 1]}])
#
# #gets newly created feed object and its id
# cls.f1 = Feed.objects.get(id=response.data[0]["id"])
# cls.f1_id = cls.f1.id
# cls.f1.delete()
class FeedTests(APITestCase):
@classmethod
def setUpClass(cls):
cls.user = User.objects.create_user(username="FeedTests")
cls.user.save()
cls.f1_url = "http://home.uchicago.edu/~jharriman/example-rss.xml"
cls.f1_id = None
cls.f1 = None
cls.f1_post_list = [
{
"feedURL": u"http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"author": u"By KATIE HAFNER",
"category": [],
"rights": u"",
"title": u"Bracing for the Falls of an Aging Nation",
"subtitle": u"",
"content": u"As Americans live longer, fall-related injuries and deaths are rising, and homes for the elderly are tackling the problem in ways large and small \u2014 even by changing the color of their carpeting and toilet seats.<img border=\"0\" height=\"1\" src=\"http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/mf.gif\" width=\"1\" /><br clear=\"all\" />",
"generator": u"",
"guid": u"http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html",
"url": u"http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/l/0L0Snytimes0N0Cinteractive0C20A140C110C0A30Chealth0Cbracing0Efor0Ethe0Efalls0Eof0Ean0Eaging0Enation0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
"contributor": u"",
"pubDate": u"2014-11-02T13:43:10Z",
"updated": u"2014-11-02T13:43:10Z",
"ackDate": 1415855355.56354,
"feed": 2
},
{
"feedURL": u"http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"author": u"By LYNN VAVRECK",
"category": ["Elections, Senate","United States Politics and Government","Elections, House of Representatives", "Voting and Voters", "Midterm Elections (2014)"],
"rights": u"",
"title": u"Midterm Calculus: The Economy Elects Presidents. Presidents Elect Congress.",
"subtitle": u"",
"content": u"While presidential elections are shaped largely by economic performance, the largest factor in midterm elections is the president.",
"generator": u"",
"guid": u"http://www.nytimes.com/2014/11/03/upshot/the-economy-elects-presidents-presidents-elect-congress.html",
"url": u"http://rss.nytimes.com/c/34625/f/642562/s/40134217/sc/1/l/0L0Snytimes0N0C20A140C110C0A30Cupshot0Cthe0Eeconomy0Eelects0Epresidents0Epresidents0Eelect0Econgress0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm",
"contributor": u"",
"pubDate": u"2014-11-02T14:00:22Z",
"updated": u"2014-11-02T14:00:22Z",
"ackDate": 1415855355.55587,
"feed": 2
}]
cls.f1 = Feed.createByURL(cls.f1_url)
cls.p1 = Post.objects.get(guid="http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html")
cls.p2 = Post.objects.get(guid="http://www.nytimes.com/2014/11/03/upshot/the-economy-elects-presidents-presidents-elect-congress.html")
cls.f1_details = {
"id": cls.f1_id,
"author": u"",
"category": u"",
"contributor": u"",
"description": u"US",
"docURL": u"",
"editorAddr": u"",
"generator": u"",
"guid": u"",
"language": u"en-us",
"logo": u"http://graphics8.nytimes.com/images/misc/NYT_logo_rss_250x40.png",
"rights": u"Copyright 2014 The New York Times Company",
"subtitle": u"US",
"title": u"NYT > U.S.",
"webmaster": u"",
"URL": u"http://www.nytimes.com/services/xml/rss/nyt/US.xml",
"ttl": 5,
"skipDays": None,
"skipHours": None,
"pubDate" : datetime.datetime(2014, 11, 2, 16, 13, 2, tzinfo=pytz.UTC),
"updated": datetime.datetime(2014, 11, 6, 1, 0, 31, tzinfo=pytz.UTC),
"posts": [cls.p1.id,cls.p2.id]
}
cls.f1_id = cls.f1.id
@classmethod
def tearDownClass(cls):
cls.user.topics.get(name="Uncategorized").delete()
cls.user.delete()
cls.f1.delete()
# Make sure to delete the feed so we don't run into other tests
def test_feed_detail_exists(cls):
"""Test accuracy of feed details"""
response = cls.client.get("/feeds/%d" % (cls.f1_id, ))
cls.assertEqual(response.status_code, 200)
cls.assertItemsEqual(response.data, cls.f1_details)
def test_post_list_exists(cls):
"""Test accuracy of post list"""
response = cls.client.get("/feeds/%d/posts/" % (cls.f1_id, ))
cls.assertEqual(response.status_code, 200)
# Delete the ids, since they are added by the server and not really relevant to checking correctness
for post in response.data:
del post["id"]
for res, exp in response.data, cls.f1_post_list:
cls.assertItemsEqual(res, exp)
def test_delete_feed(cls):
"""Feed deletion should fail - to build our database, a feed is never deleted"""
response = cls.client.delete("/feeds/%d" % (cls.f1_id,))
cls.assertEqual(response.status_code, 405)
class PostTests(APITestCase):
@classmethod
def setUpClass(cls):
cls.f1 = Feed.createByURL("http://home.uchicago.edu/~jharriman/example-rss.xml")
cls.f1.save()
cls.f1_id = cls.f1.id
cls.p1_id = cls.f1.posts.all()[0].id
cls.p1_data = {
'Length': 0,
'enclosures': [],
u'id': cls.p1_id,
'feedURL': u'http://www.nytimes.com/services/xml/rss/nyt/US.xml',
'author': u'By KATIE HAFNER',
'category': [],
'rights': u'',
'title': u'Bracing for the Falls of an Aging Nation',
'subtitle': u'',
'content': u'As Americans live longer, fall-related injuries and deaths are rising, and homes for the elderly are tackling the problem in ways large and small \u2014 even by changing the color of their carpeting and toilet seats.<img border="0" height="1" src="http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/mf.gif" width="1" /><br clear="all" />',
'generator': u'',
'guid': u'http://www.nytimes.com/interactive/2014/11/03/health/bracing-for-the-falls-of-an-aging-nation.html',
'url': u'http://rss.nytimes.com/c/34625/f/642562/s/4014157b/sc/36/l/0L0Snytimes0N0Cinteractive0C20A140C110C0A30Chealth0Cbracing0Efor0Ethe0Efalls0Eof0Ean0Eaging0Enation0Bhtml0Dpartner0Frss0Gemc0Frss/story01.htm',
'contributor': u'',
'pubDate': datetime.datetime(2014, 11, 2, 13, 43, 10, tzinfo=pytz.UTC),
'updated': datetime.datetime(2014, 11, 2, 13, 43, 10, tzinfo=pytz.UTC),
'ackDate': 1415858199.31228,
'feed': cls.f1_id,
}
@classmethod
def tearDownClass(cls):
cls.f1.delete()
def test_post_detail_exists(cls):
"""Test accuracy of post"""
response = cls.client.get('/feeds/%d/posts/' % (cls.f1_id, ))
cls.assertEqual(response.status_code, 200)
cls.assertItemsEqual([p for p in response.data if p['id'] == cls.p1_id][0], cls.p1_data)
| |
#!/usr/bin/env python3
#
# Tests the noise generators
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import unittest
import numpy as np
import pints.noise as pn
class TestNoise(unittest.TestCase):
"""
Tests if the noise generators work ok.
"""
def test_independent_noise(self):
# Test on numpy vector, tuple shape
clean = np.asarray([1, 2, 3, 10])
noisy = clean + pn.independent(1, clean.shape)
self.assertFalse(np.all(clean == noisy))
# Test integer shape
noise = pn.independent(3, 1000)
# No need to test noise characteristics extensively: handled by numpy!
np.random.seed(1)
noise = pn.independent(3, 1000)
self.assertTrue(np.abs(np.mean(noise)) < 0.2)
self.assertTrue(np.abs(np.std(noise) - 3) < 0.3)
# Test multidimensional arrays, single sigma
noise = pn.independent(3, [10, 10])
self.assertEqual(noise.shape, (10, 10))
# Standard deviation cannot be 0 or less (handled by numpy)
self.assertRaisesRegex(
ValueError, 'scale', pn.independent, -1, clean.shape)
# Shape must be a nice shape (handled by numpy)
self.assertRaises(TypeError, pn.independent, 1, 'hello')
def test_ar1(self):
# Simple test
clean = np.array([1, 2, 3, 10, 15, 8])
noisy = clean + pn.ar1(0.5, 5.0, len(clean))
self.assertFalse(np.all(clean == noisy))
# Test length
self.assertEqual(len(pn.ar1(0.1, 1, 100)), 100)
# Magnitude of rho must be less than 1
pn.ar1(0.9, 5, 10)
pn.ar1(-0.9, 5, 10)
self.assertRaisesRegex(ValueError, 'rho', pn.ar1, 1.1, 5, 10)
self.assertRaisesRegex(ValueError, 'rho', pn.ar1, -1.1, 5, 10)
# Sigma cannot be negative
pn.ar1(0.5, 5, 10)
self.assertRaisesRegex(
ValueError, 'Standard deviation', pn.ar1, 0.5, -5, 10)
# N cannot be negative
pn.ar1(0.5, 5, 1)
self.assertRaisesRegex(
ValueError, 'Number of values', pn.ar1, 0.5, 5, 0)
# Test noise properties
self.assertTrue(np.abs(np.std(pn.ar1(0.99, 1, 1000)) -
np.std(pn.ar1(0.50, 1, 1000)) < 5))
self.assertTrue(np.abs(np.std(pn.ar1(0.50, 1, 1000)) -
np.std(pn.ar1(0.50, 5, 1000)) < 2))
self.assertTrue(np.abs(np.mean(pn.ar1(-0.5, 1, 10000))) < 5)
def test_ar1_unity(self):
# Simple test
clean = np.asarray([1.3, 2, 3, 10, 15, 8])
noisy = clean + pn.ar1_unity(0.5, 5.0, len(clean))
self.assertFalse(np.all(clean == noisy))
# Test length
self.assertEqual(len(pn.ar1_unity(0.1, 1, 100)), 100)
# Magnitude of rho must be less than 1
pn.ar1(0.9, 5, 10)
pn.ar1(-0.5, 5, 10)
self.assertRaisesRegex(ValueError, 'rho', pn.ar1_unity, 1.1, 5, 10)
self.assertRaisesRegex(ValueError, 'rho', pn.ar1_unity, -1.1, 5, 10)
# Sigma cannot be negative
pn.ar1_unity(0.5, 5, 10)
self.assertRaisesRegex(
ValueError, 'Standard deviation', pn.ar1_unity, 0.5, -5, 10)
# N cannot be negative
pn.ar1(0.5, 5, 1)
self.assertRaisesRegex(
ValueError, 'Number of values', pn.ar1_unity, 0.5, 5, 0)
# Test noise properties
self.assertTrue(np.abs(np.std(pn.ar1_unity(0.9, 1, 10000)) -
np.std(pn.ar1_unity(0.50, 1, 10000))) < 2)
self.assertTrue(np.abs(np.mean(pn.ar1_unity(-0.5, 1, 10000)) - 1) < 2)
def test_arma11(self):
# Test construction errors
self.assertRaisesRegex(
ValueError, 'rho', pn.arma11, 1.1, 0.5, 5, 100)
self.assertRaisesRegex(
ValueError, 'theta', pn.arma11, 0.5, 1.1, 5, 100)
self.assertRaisesRegex(
ValueError, 'Standard deviation', pn.arma11, 0.5, 0.5, -5, 100)
self.assertRaisesRegex(
ValueError, 'Number of values', pn.arma11, 0.5, 0.5, 5, -100)
# test values
samples = pn.arma11(0.5, 0.5, 5, 10000)
self.assertTrue(np.mean(samples) < 1)
self.assertTrue(np.abs(np.std(samples) - 5) < 1)
def test_arma11_unity(self):
# Test construction errors
self.assertRaisesRegex(
ValueError, 'rho', pn.arma11_unity, 1.1, 0.5, 5, 100)
self.assertRaisesRegex(
ValueError, 'theta', pn.arma11_unity, 0.5, 1.1, 5, 100)
self.assertRaisesRegex(
ValueError, 'Standard dev', pn.arma11_unity, 0.5, 0.5, -5, 100)
self.assertRaisesRegex(
ValueError, 'Number of values', pn.arma11_unity, 0.5, 0.5, 5, -100)
# test values
samples = pn.arma11_unity(0.5, 0.5, 5, 10000)
self.assertTrue(np.abs(np.mean(samples) - 1) < 1)
self.assertTrue(np.abs(np.std(samples) - 5) < 1)
def test_multiplicative_gaussian(self):
# Test construction errors
self.assertRaisesRegex(
ValueError,
'Standard deviation',
pn.multiplicative_gaussian,
1.0,
-1.0,
[1, 2, 3]
)
self.assertRaisesRegex(
ValueError,
'Standard deviation',
pn.multiplicative_gaussian,
1.0,
[2.0, -1.0],
np.array([[1, 2, 3], [4, 5, 6]])
)
f_too_many_dims = np.zeros((2, 10, 5))
self.assertRaisesRegex(
ValueError,
'f must have be of shape',
pn.multiplicative_gaussian,
1.0,
1.0,
f_too_many_dims
)
self.assertRaisesRegex(
ValueError,
'eta must be',
pn.multiplicative_gaussian,
np.array([[1, 2, 3], [4, 5, 6]]),
1.0,
[1, 2, 3]
)
self.assertRaisesRegex(
ValueError,
'eta must be',
pn.multiplicative_gaussian,
np.array([1, 2, 3]),
1.0,
[1, 2, 3]
)
self.assertRaisesRegex(
ValueError,
'sigma must be',
pn.multiplicative_gaussian,
1.0,
np.array([[1, 2, 3], [4, 5, 6]]),
[1, 2, 3]
)
self.assertRaisesRegex(
ValueError,
'sigma must be',
pn.multiplicative_gaussian,
1.0,
np.array([1, 2, 3]),
[1, 2, 3]
)
# Test values
samples_small_f = pn.multiplicative_gaussian(2.0, 1.0, [1] * 10000)
self.assertTrue(np.abs(np.mean(samples_small_f)) < 1)
self.assertTrue(np.abs(np.std(samples_small_f) - 1) < 1)
samples_large_f = pn.multiplicative_gaussian(2.0, 1.0, [2] * 10000)
self.assertTrue(np.abs(np.mean(samples_large_f)) < 1)
self.assertTrue(np.abs(np.std(samples_large_f) - 4) < 1)
# Test multi-outputs
f_2d = np.array([[1, 2, 3, 4], [11, 12, 13, 14]])
samples_2d_eta = pn.multiplicative_gaussian([1.0, 3.0], 5.0, f_2d)
self.assertTrue(samples_2d_eta.shape == f_2d.shape)
samples_2d_sigma = pn.multiplicative_gaussian(1.0, [0.5, 0.75], f_2d)
self.assertTrue(samples_2d_sigma.shape == f_2d.shape)
samples_2d_both = pn.multiplicative_gaussian([1.0, 3.0],
[0.5, 0.75], f_2d)
self.assertTrue(samples_2d_both.shape == f_2d.shape)
if __name__ == '__main__':
unittest.main()
| |
import math
import time
class vector:
x = 0.0
y = 0.0
def __init__(self):
self.X = 0.0
self.y = 0.0
def __str__(self):
return '{x:%s, y:%s}' % (self.x, self.y)
class car:
# given by system
id = None
token = None
# given by system - end
# given by user
name = None
color = None
type = None
# given by user - end
# external control variables
steer_angle = 0.0
throttle = 0
brake = 0
rear_slip = 0
front_slip = 0
# external control variables - end
# external read variables - begin
angle = 0.0
position_wc = vector()
# external read variables - end
# car simulation variables
velocity_wc = vector()
angular_velocity = 0.0
velocity = vector()
acceleration_wc = vector()
rot_angle = 0.0
side_slip = 0.0
slipangle_front = 0.0
slipangle_rear = 0.0
force = vector()
resistance = vector()
acceleration = vector()
torque = 0.0
angular_acceleration = 0.0
sn = 0.0
cs = 0.0
yawspeed = 0.0
weight = 0.0
ftraction = vector()
flatf = vector()
flatr = vector()
# car simulation variables - end
## car configuration
b = 1.0
c = 1.0
wheel_base = 2.0 # b + c = 0.0
h = 1.0
mass = 1500
inertia = 1500
width = 1.5
length = 3.0 # must be > wheel_base
wheel_length = 0.7
wheel_width = 0.3
## car configuration - end
## constants
PI = 3.14159265359
DRAG = 5.0 # factor for air resistance (drag)
RESISTANCE = 30.0 # factor for rolling resistance
CA_R = -5.20 # cornering stiffness
CA_F = -5.0 # cornering stiffness
MAX_GRIP = 2.0 # maximum (normalised) friction force, =diameter of friction circle
## constants - end
def __init__(self, name, color, type):
self.name = name
self.color = color
self.type = type
self.steer_angle = 0.0
self.throttle = 0
self.brake = 0
self.rear_slip = 0
self.front_slip = 0
self.angle = 0.0
self.position_wc = vector()
self.velocity_wc = vector()
self.angular_velocity = 0.0
self.velocity = vector()
self.acceleration_wc = vector()
self.rot_angle = 0.0
self.side_slip = 0.0
self.slipangle_front = 0.0
self.slipangle_rear = 0.0
self.force = vector()
self.resistance = vector()
self.acceleration = vector()
self.torque = 0.0
self.angular_acceleration = 0.0
self.sn = 0.0
self.cs = 0.0
self.yawspeed = 0.0
self.weight = 0.0
self.ftraction = vector()
self.flatf = vector()
self.flatr = vector()
def get_info(self):
return (self.name, self.color, self.type)
def get_pos(self):
return (self.position_wc.x, self.position_wc.y, self.angle, self.steer_angle, self.throttle)
def sign(self, val):
if val >= 0.0:
return 1.0
return -1.0
def move_tick(self, delta_t):
self.sn = math.sin(self.angle)
self.cs = math.cos(self.angle)
if self.steer_angle != 0.0:
breakme = 1
# SAE convention: x is to the front of the car, y is to the right, z is down
# bangz: Velocity of Car. Vlat and Vlong
# transform velocity in world reference frame to velocity in car reference frame
self.velocity.x = self.cs * self.velocity_wc.y + self.sn * self.velocity_wc.x
self.velocity.y = -self.sn * self.velocity_wc.y + self.cs * self.velocity_wc.x
# Lateral force on wheels
#
# Resulting velocity of the wheels as result of the yaw rate of the car body
# v = yawrate * r where r is distance of wheel to CG (approx. half wheel base)
# yawrate (ang.velocity) must be in rad/s
#
self.yawspeed = self.wheel_base * 0.5 * self.angular_velocity
#bangz: velocity.x = fVLong_, velocity.y = fVLat_
if self.velocity.x == 0: # TODO: fix math.singularity
self.rot_angle = 0
else:
self.rot_angle = math.atan2(self.yawspeed, self.velocity.x)
# Calculate the side slip angle of the car (a.k.a. beta)
if self.velocity.x == 0: # TODO: fix math.singularity
self.side_slip = 0
else:
self.side_slip = math.atan2(self.velocity.y, self.velocity.x)
# Calculate slip angles for front and rear wheels (a.k.a. alpha)
self.slipangle_front = self.side_slip + self.rot_angle - self.steer_angle
self.slipangle_rear = self.side_slip - self.rot_angle
# weight per axle = half car mass times 1G (=9.8m/s^2)
self.weight = self.mass * 9.8 * 0.5
# lateral force on front wheels = (Ca * slip angle) capped to friction circle * load
self.flatf.x = 0
self.flatf.y = self.CA_F * self.slipangle_front
self.flatf.y = min(self.MAX_GRIP, self.flatf.y)
self.flatf.y = max(-self.MAX_GRIP, self.flatf.y)
self.flatf.y *= self.weight
if self.front_slip != 0:
self.flatf.y *= 0.5
# lateral force on rear wheels
self.flatr.x = 0
self.flatr.y = self.CA_R * self.slipangle_rear
self.flatr.y = min(self.MAX_GRIP, self.flatr.y)
self.flatr.y = max(-self.MAX_GRIP, self.flatr.y)
self.flatr.y *= self.weight
if self.rear_slip != 0:
self.flatr.y *= 0.5
# longtitudinal force on rear wheels - very simple traction model
self.ftraction.x = 100 * (self.throttle - self.brake * self.sign(self.velocity.x))
self.ftraction.y = 0
if self.rear_slip != 0:
self.ftraction.x *= 0.5
# Forces and torque on body
# drag and rolling resistance
self.resistance.x = -(self.RESISTANCE * self.velocity.x + self.DRAG * self.velocity.x * abs(self.velocity.x))
self.resistance.y = -(self.RESISTANCE * self.velocity.y + self.DRAG * self.velocity.y * abs(self.velocity.y))
# sum forces
self.force.x = self.ftraction.x + math.sin(self.steer_angle) * self.flatf.x + self.flatr.x + self.resistance.x
self.force.y = self.ftraction.y + math.cos(self.steer_angle) * self.flatf.y + self.flatr.y + self.resistance.y
# torque on body from lateral forces
self.torque = self.b * self.flatf.y - self.c * self.flatr.y
# Acceleration
# Newton F = m.a, therefore a = F/m
self.acceleration.x = self.force.x / self.mass
self.acceleration.y = self.force.y / self.mass
self.angular_acceleration = self.torque / self.inertia
# Velocity and position
# transform acceleration from car reference frame to world reference frame
self.acceleration_wc.x = self.cs * self.acceleration.y + self.sn * self.acceleration.x
self.acceleration_wc.y = -self.sn * self.acceleration.y + self.cs * self.acceleration.x
# velocity is integrated acceleration
#
self.velocity_wc.x += delta_t * self.acceleration_wc.x
self.velocity_wc.y += delta_t * self.acceleration_wc.y
# position is integrated velocity
#
self.position_wc.x += delta_t * self.velocity_wc.x
self.position_wc.y += delta_t * self.velocity_wc.y
# Angular velocity and heading
# integrate angular acceleration to get angular velocity
#
self.angular_velocity += delta_t * self.angular_acceleration
# integrate angular velocity to get angular orientation
#
self.angle += delta_t * self.angular_velocity
def __str__(self):
return str(self.__dict__)
def main():
my_car = car('john', 'red', 'truck')
my_car.throttle = 10
my_car.steer_angle = my_car.PI / 256.0
for i in range(0, 100):
my_car.move_tick(16.0 / 1000.0)
print my_car.steer_angle
print my_car.throttle
print my_car.brake
print my_car.rear_slip
print my_car.front_slip
print my_car.angle
print my_car.position_wc
print my_car.velocity_wc
print my_car.angular_velocity
print my_car.velocity
print my_car.acceleration_wc
print my_car.rot_angle
print my_car.side_slip
print my_car.slipangle_front
print my_car.slipangle_rear
print my_car.force
print my_car.resistance
print my_car.acceleration
print my_car.torque
print my_car.angular_acceleration
print my_car.sn
print my_car.cs
print my_car.yawspeed
print my_car.weight
print my_car.ftraction
print my_car.flatf
print my_car.flatr
return
print car.PI
while True:
print
my_car.move_tick(16.0 / 1000)
time.sleep(1)
print my_car.get_pos()
if __name__ == '__main__':
main()
| |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import copy
from datetime import datetime, timedelta
from dateutil.tz import tzutc
import json
import itertools
import ipaddress
import logging
import os
import random
import re
import sys
import threading
import time
from urllib import parse as urlparse
from urllib.request import getproxies, proxy_bypass
from dateutil.parser import ParserError, parse
from c7n import config
from c7n.exceptions import ClientError, PolicyValidationError
# Try to play nice in a serverless environment, where we don't require yaml
try:
import yaml
except ImportError: # pragma: no cover
SafeLoader = BaseSafeDumper = yaml = None
else:
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as BaseSafeDumper
except ImportError: # pragma: no cover
from yaml import SafeLoader, SafeDumper as BaseSafeDumper
class SafeDumper(BaseSafeDumper or object):
def ignore_aliases(self, data):
return True
log = logging.getLogger('custodian.utils')
class VarsSubstitutionError(Exception):
pass
def load_file(path, format=None, vars=None):
if format is None:
format = 'yaml'
_, ext = os.path.splitext(path)
if ext[1:] == 'json':
format = 'json'
with open(path) as fh:
contents = fh.read()
if vars:
try:
contents = contents.format(**vars)
except IndexError:
msg = 'Failed to substitute variable by positional argument.'
raise VarsSubstitutionError(msg)
except KeyError as e:
msg = 'Failed to substitute variables. KeyError on {}'.format(str(e))
raise VarsSubstitutionError(msg)
if format == 'yaml':
return yaml_load(contents)
elif format == 'json':
return loads(contents)
def yaml_load(value):
if yaml is None:
raise RuntimeError("Yaml not available")
return yaml.load(value, Loader=SafeLoader)
def yaml_dump(value):
if yaml is None:
raise RuntimeError("Yaml not available")
return yaml.dump(value, default_flow_style=False, Dumper=SafeDumper)
def loads(body):
return json.loads(body)
def dumps(data, fh=None, indent=0):
if fh:
return json.dump(data, fh, cls=DateTimeEncoder, indent=indent)
else:
return json.dumps(data, cls=DateTimeEncoder, indent=indent)
def format_event(evt):
return json.dumps(evt, indent=2)
def filter_empty(d):
for k, v in list(d.items()):
if not v:
del d[k]
return d
# We need a minimum floor when examining possible timestamp
# values to distinguish from other numeric time usages. Use
# the S3 Launch Date.
DATE_FLOOR = time.mktime((2006, 3, 19, 0, 0, 0, 0, 0, 0))
def parse_date(v, tz=None):
"""Handle various permutations of a datetime serialization
to a datetime with the given timezone.
Handles strings, seconds since epoch, and milliseconds since epoch.
"""
if v is None:
return v
tz = tz or tzutc()
if isinstance(v, datetime):
if v.tzinfo is None:
return v.astimezone(tz)
return v
if isinstance(v, str) and not v.isdigit():
try:
return parse(v).astimezone(tz)
except (AttributeError, TypeError, ValueError, OverflowError):
pass
# OSError on windows -- https://bugs.python.org/issue36439
exceptions = (ValueError, OSError) if os.name == "nt" else (ValueError)
if isinstance(v, (int, float, str)):
try:
if float(v) > DATE_FLOOR:
v = datetime.fromtimestamp(float(v)).astimezone(tz)
except exceptions:
pass
if isinstance(v, (int, float, str)):
# try interpreting as milliseconds epoch
try:
if float(v) > DATE_FLOOR:
v = datetime.fromtimestamp(float(v) / 1000).astimezone(tz)
except exceptions:
pass
return isinstance(v, datetime) and v or None
def type_schema(
type_name, inherits=None, rinherit=None,
aliases=None, required=None, **props):
"""jsonschema generation helper
params:
- type_name: name of the type
- inherits: list of document fragments that are required via anyOf[$ref]
- rinherit: use another schema as a base for this, basically work around
inherits issues with additionalProperties and type enums.
- aliases: additional names this type maybe called
- required: list of required properties, by default 'type' is required
- props: additional key value properties
"""
if aliases:
type_names = [type_name]
type_names.extend(aliases)
else:
type_names = [type_name]
if rinherit:
s = copy.deepcopy(rinherit)
s['properties']['type'] = {'enum': type_names}
else:
s = {
'type': 'object',
'properties': {
'type': {'enum': type_names}}}
# Ref based inheritance and additional properties don't mix well.
# https://stackoverflow.com/questions/22689900/json-schema-allof-with-additionalproperties
if not inherits:
s['additionalProperties'] = False
s['properties'].update(props)
for k, v in props.items():
if v is None:
del s['properties'][k]
if not required:
required = []
if isinstance(required, list):
required.append('type')
s['required'] = required
if inherits:
extended = s
s = {'allOf': [{'$ref': i} for i in inherits]}
s['allOf'].append(extended)
return s
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def group_by(resources, key):
"""Return a mapping of key value to resources with the corresponding value.
Key may be specified as dotted form for nested dictionary lookup
"""
resource_map = {}
parts = key.split('.')
for r in resources:
v = r
for k in parts:
v = v.get(k)
if not isinstance(v, dict):
break
resource_map.setdefault(v, []).append(r)
return resource_map
def chunks(iterable, size=50):
"""Break an iterable into lists of size"""
batch = []
for n in iterable:
batch.append(n)
if len(batch) % size == 0:
yield batch
batch = []
if batch:
yield batch
def camelResource(obj, implicitDate=False, implicitTitle=True):
"""Some sources from apis return lowerCased where as describe calls
always return TitleCase, this function turns the former to the later
implicitDate ~ automatically sniff keys that look like isoformat date strings
and convert to python datetime objects.
"""
if not isinstance(obj, dict):
return obj
for k in list(obj.keys()):
v = obj.pop(k)
if implicitTitle:
ok = "%s%s" % (k[0].upper(), k[1:])
else:
ok = k
obj[ok] = v
if implicitDate:
# config service handles datetime differently then describe sdks
# the sdks use knowledge of the shape to support language native
# date times, while config just turns everything into a serialized
# json with mangled keys without type info. to normalize to describe
# we implicitly sniff keys which look like datetimes, and have an
# isoformat marker ('T').
kn = k.lower()
if isinstance(v, (str, int)) and ('time' in kn or 'date' in kn):
try:
dv = parse_date(v)
except ParserError:
dv = None
if dv:
obj[ok] = dv
if isinstance(v, dict):
camelResource(v, implicitDate, implicitTitle)
elif isinstance(v, list):
for e in v:
camelResource(e, implicitDate, implicitTitle)
return obj
def get_account_id_from_sts(session):
response = session.client('sts').get_caller_identity()
return response.get('Account')
def get_account_alias_from_sts(session):
response = session.client('iam').list_account_aliases()
aliases = response.get('AccountAliases', ())
return aliases and aliases[0] or ''
def query_instances(session, client=None, **query):
"""Return a list of ec2 instances for the query.
"""
if client is None:
client = session.client('ec2')
p = client.get_paginator('describe_instances')
results = p.paginate(**query)
return list(itertools.chain(
*[r["Instances"] for r in itertools.chain(
*[pp['Reservations'] for pp in results])]))
CONN_CACHE = threading.local()
def local_session(factory, region=None):
"""Cache a session thread local for up to 45m"""
factory_region = getattr(factory, 'region', 'global')
if region:
factory_region = region
s = getattr(CONN_CACHE, factory_region, {}).get('session')
t = getattr(CONN_CACHE, factory_region, {}).get('time')
n = time.time()
if s is not None and t + (60 * 45) > n:
return s
s = factory()
setattr(CONN_CACHE, factory_region, {'session': s, 'time': n})
return s
def reset_session_cache():
for k in [k for k in dir(CONN_CACHE) if not k.startswith('_')]:
setattr(CONN_CACHE, k, {})
def annotation(i, k):
return i.get(k, ())
def set_annotation(i, k, v):
"""
>>> x = {}
>>> set_annotation(x, 'marker', 'a')
>>> annotation(x, 'marker')
['a']
"""
if not isinstance(i, dict):
raise ValueError("Can only annotate dictionaries")
if not isinstance(v, list):
v = [v]
if k in i:
ev = i.get(k)
if isinstance(ev, list):
ev.extend(v)
else:
i[k] = v
def parse_s3(s3_path):
if not s3_path.startswith('s3://'):
raise ValueError("invalid s3 path")
ridx = s3_path.find('/', 5)
if ridx == -1:
ridx = None
bucket = s3_path[5:ridx]
s3_path = s3_path.rstrip('/')
if ridx is None:
key_prefix = ""
else:
key_prefix = s3_path[s3_path.find('/', 5):]
return s3_path, bucket, key_prefix
REGION_PARTITION_MAP = {
'us-gov-east-1': 'aws-us-gov',
'us-gov-west-1': 'aws-us-gov',
'cn-north-1': 'aws-cn',
'cn-northwest-1': 'aws-cn',
'us-isob-east-1': 'aws-iso-b',
'us-iso-east-1': 'aws-iso'
}
def get_partition(region):
return REGION_PARTITION_MAP.get(region, 'aws')
def generate_arn(
service, resource, partition='aws',
region=None, account_id=None, resource_type=None, separator='/'):
"""Generate an Amazon Resource Name.
See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html.
"""
if region and region in REGION_PARTITION_MAP:
partition = REGION_PARTITION_MAP[region]
if service == 's3':
region = ''
arn = 'arn:%s:%s:%s:%s:' % (
partition, service, region if region else '', account_id if account_id else '')
if resource_type:
if resource.startswith(separator):
separator = ''
arn = arn + '%s%s%s' % (resource_type, separator, resource)
else:
arn = arn + resource
return arn
def snapshot_identifier(prefix, db_identifier):
"""Return an identifier for a snapshot of a database or cluster.
"""
now = datetime.now()
return '%s-%s-%s' % (prefix, db_identifier, now.strftime('%Y-%m-%d-%H-%M'))
retry_log = logging.getLogger('c7n.retry')
def get_retry(retry_codes=(), max_attempts=8, min_delay=1, log_retries=False):
"""Decorator for retry boto3 api call on transient errors.
https://www.awsarchitectureblog.com/2015/03/backoff.html
https://en.wikipedia.org/wiki/Exponential_backoff
:param codes: A sequence of retryable error codes.
:param max_attempts: The max number of retries, by default the delay
time is proportional to the max number of attempts.
:param log_retries: Whether we should log retries, if specified
specifies the level at which the retry should be logged.
:param _max_delay: The maximum delay for any retry interval *note*
this parameter is only exposed for unit testing, as its
derived from the number of attempts.
Returns a function for invoking aws client calls that
retries on retryable error codes.
"""
max_delay = max(min_delay, 2) ** max_attempts
def _retry(func, *args, ignore_err_codes=(), **kw):
for idx, delay in enumerate(
backoff_delays(min_delay, max_delay, jitter=True)):
try:
return func(*args, **kw)
except ClientError as e:
if e.response['Error']['Code'] in ignore_err_codes:
return
elif e.response['Error']['Code'] not in retry_codes:
raise
elif idx == max_attempts - 1:
raise
if log_retries:
retry_log.log(
log_retries,
"retrying %s on error:%s attempt:%d last delay:%0.2f",
func, e.response['Error']['Code'], idx, delay)
time.sleep(delay)
return _retry
def backoff_delays(start, stop, factor=2.0, jitter=False):
"""Geometric backoff sequence w/ jitter
"""
cur = start
while cur <= stop:
if jitter:
yield cur - (cur * random.random())
else:
yield cur
cur = cur * factor
def parse_cidr(value):
"""Process cidr ranges."""
klass = IPv4Network
if '/' not in value:
klass = ipaddress.ip_address
try:
v = klass(str(value))
except (ipaddress.AddressValueError, ValueError):
v = None
return v
class IPv4Network(ipaddress.IPv4Network):
# Override for net 2 net containment comparison
def __contains__(self, other):
if other is None:
return False
if isinstance(other, ipaddress._BaseNetwork):
return self.supernet_of(other)
return super(IPv4Network, self).__contains__(other)
if (sys.version_info.major == 3 and sys.version_info.minor <= 6): # pragma: no cover
@staticmethod
def _is_subnet_of(a, b):
try:
# Always false if one is v4 and the other is v6.
if a._version != b._version:
raise TypeError(f"{a} and {b} are not of the same version")
return (b.network_address <= a.network_address and
b.broadcast_address >= a.broadcast_address)
except AttributeError:
raise TypeError(f"Unable to test subnet containment "
f"between {a} and {b}")
def supernet_of(self, other):
"""Return True if this network is a supernet of other."""
return self._is_subnet_of(other, self)
def reformat_schema(model):
""" Reformat schema to be in a more displayable format. """
if not hasattr(model, 'schema'):
return "Model '{}' does not have a schema".format(model)
if 'properties' not in model.schema:
return "Schema in unexpected format."
ret = copy.deepcopy(model.schema['properties'])
if 'type' in ret:
del(ret['type'])
for key in model.schema.get('required', []):
if key in ret:
ret[key]['required'] = True
return ret
# from botocore.utils avoiding runtime dependency for botocore for other providers.
# license apache 2.0
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise ValueError(expression)
if remainder:
if current_key not in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key
# with an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
def format_string_values(obj, err_fallback=(IndexError, KeyError), *args, **kwargs):
"""
Format all string values in an object.
Return the updated object
"""
if isinstance(obj, dict):
new = {}
for key in obj.keys():
new[key] = format_string_values(obj[key], *args, **kwargs)
return new
elif isinstance(obj, list):
new = []
for item in obj:
new.append(format_string_values(item, *args, **kwargs))
return new
elif isinstance(obj, str):
try:
return obj.format(*args, **kwargs)
except err_fallback:
return obj
else:
return obj
def parse_url_config(url):
if url and '://' not in url:
url += "://"
conf = config.Bag()
parsed = urlparse.urlparse(url)
for k in ('scheme', 'netloc', 'path'):
conf[k] = getattr(parsed, k)
for k, v in urlparse.parse_qs(parsed.query).items():
conf[k] = v[0]
conf['url'] = url
return conf
def get_proxy_url(url):
proxies = getproxies()
parsed = urlparse.urlparse(url)
proxy_keys = [
parsed.scheme + '://' + parsed.netloc,
parsed.scheme,
'all://' + parsed.netloc,
'all'
]
# Set port if not defined explicitly in url.
port = parsed.port
if port is None and parsed.scheme == 'http':
port = 80
elif port is None and parsed.scheme == 'https':
port = 443
hostname = parsed.hostname is not None and parsed.hostname or ''
# Determine if proxy should be used based on no_proxy entries.
# Note this does not support no_proxy ip or cidr entries.
if proxy_bypass("%s:%s" % (hostname, port)):
return None
for key in proxy_keys:
if key in proxies:
return proxies[key]
return None
class FormatDate:
"""a datetime wrapper with extended pyformat syntax"""
date_increment = re.compile(r'\+[0-9]+[Mdh]')
def __init__(self, d=None):
self._d = d
@property
def datetime(self):
return self._d
@classmethod
def utcnow(cls):
return cls(datetime.utcnow())
def __getattr__(self, k):
return getattr(self._d, k)
def __format__(self, fmt=None):
d = self._d
increments = self.date_increment.findall(fmt)
for i in increments:
p = {}
if i[-1] == 'M':
p['minutes'] = float(i[1:-1])
if i[-1] == 'h':
p['hours'] = float(i[1:-1])
if i[-1] == 'd':
p['days'] = float(i[1:-1])
d = d + timedelta(**p)
if increments:
fmt = self.date_increment.sub("", fmt)
return d.__format__(fmt)
class QueryParser:
QuerySchema = {}
type_name = ''
multi_value = True
value_key = 'Values'
@classmethod
def parse(cls, data):
filters = []
if not isinstance(data, (tuple, list)):
raise PolicyValidationError(
"%s Query invalid format, must be array of dicts %s" % (
cls.type_name,
data))
for d in data:
if not isinstance(d, dict):
raise PolicyValidationError(
"%s Query Filter Invalid %s" % (cls.type_name, data))
if "Name" not in d or cls.value_key not in d:
raise PolicyValidationError(
"%s Query Filter Invalid: Missing Key or Values in %s" % (
cls.type_name, data))
key = d['Name']
values = d[cls.value_key]
if not cls.multi_value and isinstance(values, list):
raise PolicyValidationError(
"%s Query Filter Invalid Key: Value:%s Must be single valued" % (
cls.type_name, key))
elif not cls.multi_value:
values = [values]
if key not in cls.QuerySchema and not key.startswith('tag:'):
raise PolicyValidationError(
"%s Query Filter Invalid Key:%s Valid: %s" % (
cls.type_name, key, ", ".join(cls.QuerySchema.keys())))
vtype = cls.QuerySchema.get(key)
if vtype is None and key.startswith('tag'):
vtype = str
if not isinstance(values, list):
raise PolicyValidationError(
"%s Query Filter Invalid Values, must be array %s" % (
cls.type_name, data,))
for v in values:
if isinstance(vtype, tuple):
if v not in vtype:
raise PolicyValidationError(
"%s Query Filter Invalid Value: %s Valid: %s" % (
cls.type_name, v, ", ".join(vtype)))
elif not isinstance(v, vtype):
raise PolicyValidationError(
"%s Query Filter Invalid Value Type %s" % (
cls.type_name, data,))
filters.append(d)
return filters
def get_annotation_prefix(s):
return 'c7n:{}'.format(s)
def merge_dict_list(dict_iter):
"""take an list of dictionaries and merge them.
last dict wins/overwrites on keys.
"""
result = {}
for d in dict_iter:
result.update(d)
return result
def merge_dict(a, b):
"""Perform a merge of dictionaries a and b
Any subdictionaries will be recursively merged.
Any leaf elements in the form of a list or scalar will use the value from a
"""
d = {}
for k, v in a.items():
if k not in b:
d[k] = v
elif isinstance(v, dict) and isinstance(b[k], dict):
d[k] = merge_dict(v, b[k])
for k, v in b.items():
if k not in d:
d[k] = v
return d
def select_keys(d, keys):
result = {}
for k in keys:
result[k] = d.get(k)
return result
def get_human_size(size, precision=2):
# interesting discussion on 1024 vs 1000 as base
# https://en.wikipedia.org/wiki/Binary_prefix
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
suffixIndex = 0
while size > 1024:
suffixIndex += 1
size = size / 1024.0
return "%.*f %s" % (precision, size, suffixes[suffixIndex])
| |
from __future__ import unicode_literals
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils.encoding import force_text
from django.utils.functional import allow_lazy, SimpleLazyObject
from django.utils import six
from django.utils.six.moves import html_entities
from django.utils.translation import ugettext_lazy, ugettext as _, pgettext
from django.utils.safestring import mark_safe
if six.PY2:
# Import force_unicode even though this module doesn't use it, because some
# people rely on it being here.
from django.utils.encoding import force_unicode # NOQA
# Capitalizes the first letter of a string.
capfirst = lambda x: x and force_text(x)[0].upper() + force_text(x)[1:]
capfirst = allow_lazy(capfirst, six.text_type)
# Set up regular expressions
re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.U | re.S)
re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks and most spaces in
the text. Expects that existing line breaks are posix newlines.
"""
text = force_text(text)
def _generator():
it = iter(text.split(' '))
word = next(it)
yield word
pos = len(word) - word.rfind('\n') - 1
for word in it:
if "\n" in word:
lines = word.split('\n')
else:
lines = (word,)
pos += len(lines[0]) + 1
if pos > width:
yield '\n'
pos = len(lines[-1])
else:
yield ' '
if len(lines) > 1:
pos = len(lines[-1])
yield word
return ''.join(_generator())
wrap = allow_lazy(wrap, six.text_type)
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super(Truncator, self).__init__(lambda: force_text(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s...')
truncate = force_text(truncate)
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None):
"""
Returns the text truncated to be no longer than the specified number
of characters.
Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to a translatable string of an
ellipsis (...).
"""
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
chars = allow_lazy(chars)
def words(self, num, truncate=None, html=False):
"""
Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...).
"""
length = int(num)
if html:
return self._html_words(length, truncate)
return self._text_words(length, truncate)
words = allow_lazy(words)
def _text_words(self, length, truncate):
"""
Truncates a string after a certain number of words.
Newlines in the string will be stripped.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _html_words(self, length, truncate):
"""
Truncates HTML to a certain number of words (not counting tags and
comments). Closes opened tags if they were correctly closed in the
given HTML.
Newlines in the HTML are preserved.
"""
if length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML words and keep note of open tags
pos = 0
end_text_pos = 0
words = 0
open_tags = []
while words <= length:
m = re_words.search(self._wrapped, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word
words += 1
if words == length:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or end_text_pos:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if words <= length:
# Don't try to close tags if we don't need to truncate
return self._wrapped
out = self._wrapped[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
get_valid_filename = allow_lazy(get_valid_filename, six.text_type)
def get_text_list(list_, last_word=ugettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if len(list_) == 0:
return ''
if len(list_) == 1:
return force_text(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join(force_text(i) for i in list_[:-1]),
force_text(last_word), force_text(list_[-1]))
get_text_list = allow_lazy(get_text_list, six.text_type)
def normalize_newlines(text):
"""Normalizes CRLF and CR newlines to just LF."""
text = force_text(text)
return re_newlines.sub('\n', text)
normalize_newlines = allow_lazy(normalize_newlines, six.text_type)
def phone2numeric(phone):
"""Converts a phone number with letters into its numeric equivalent."""
char2number = {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3',
'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6',
'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8',
'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9'}
return ''.join(char2number.get(c, c) for c in phone.lower())
phone2numeric = allow_lazy(phone2numeric)
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
class StreamingBuffer(object):
def __init__(self):
self.vals = []
def write(self, val):
self.vals.append(val)
def read(self):
ret = b''.join(self.vals)
self.vals = []
return ret
def flush(self):
return
def close(self):
return
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=buf)
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
zfile.flush()
yield buf.read()
zfile.close()
yield buf.read()
ustring_re = re.compile("([\u0080-\uffff])")
def javascript_quote(s, quote_double_quotes=False):
def fix(match):
return "\\u%04x" % ord(match.group(1))
if type(s) == bytes:
s = s.decode('utf-8')
elif type(s) != six.text_type:
raise TypeError(s)
s = s.replace('\\', '\\\\')
s = s.replace('\r', '\\r')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace("'", "\\'")
if quote_double_quotes:
s = s.replace('"', '"')
return str(ustring_re.sub(fix, s))
javascript_quote = allow_lazy(javascript_quote, six.text_type)
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
text = force_text(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return six.unichr(c)
except ValueError:
return match.group(0)
else:
try:
return six.unichr(html_entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape_entities(text):
return _entity_re.sub(_replace_entity, text)
unescape_entities = allow_lazy(unescape_entities, six.text_type)
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
unescape_string_literal = allow_lazy(unescape_string_literal)
def slugify(value):
"""
Converts to lowercase, removes non-word characters (alphanumerics and
underscores) and converts spaces to hyphens. Also strips leading and
trailing whitespace.
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value))
slugify = allow_lazy(slugify, six.text_type)
def camel_case_to_spaces(value):
"""
Splits CamelCase and converts to lower case. Also strips leading and
trailing whitespace.
"""
return re_camel_case.sub(r' \1', value).strip().lower()
| |
"""App Spec Model"""
# pylint: disable=no-self-argument,no-self-use
# standard library
from copy import deepcopy
from typing import List, Optional
# third-party
from pydantic import BaseModel, Field, validator
from semantic_version import Version
# first-party
from tcex.app_config.models.install_json_model import (
FeedsModel,
InstallJsonCommonModel,
InstallJsonOrganizationModel,
OutputVariablesModel,
ParamsModel,
RetryModel,
TypeEnum,
snake_to_camel,
)
from tcex.app_config.models.job_json_model import JobJsonCommonModel
from tcex.pleb.none_model import NoneModel
__all__ = ['AppSpecYmlModel']
class FeedsSpecModel(FeedsModel):
"""Model for app_spec.organization.feeds."""
job: Optional[JobJsonCommonModel] = Field(None, description='')
class NotesPerActionModel(BaseModel):
"""Model for app_spec.notes_per_action."""
action: str = Field(..., description='The action name.')
note: str = Field(..., description='The note describing the action.')
class Config:
"""DataModel Config"""
alias_generator = snake_to_camel
validate_assignment = True
class OrganizationModel(InstallJsonOrganizationModel):
"""Model for app_spec.organization."""
feeds: Optional[List[FeedsSpecModel]] = Field(None, description='')
class Config:
"""DataModel Config"""
alias_generator = snake_to_camel
validate_assignment = True
class OutputVariablesSpecModel(OutputVariablesModel):
"""Model for app_spec.outputs.output_variables."""
disabled: Optional[bool] = Field(
False,
description='If True, the output will not be included in ij/lj files.',
)
type: str = Field(
'String',
description='The output variable type (e.g., String, TCEntity, etc).',
)
class Config:
"""DataModel Config"""
alias_generator = snake_to_camel
validate_assignment = True
class OutputDataModel(BaseModel):
"""Model for app_spec.output_data."""
display: Optional[str] = Field(
None,
description='The display clause that controls visibility of the output.',
)
output_variables: List[OutputVariablesSpecModel] = Field(
...,
description='An array of output variables.',
)
class Config:
"""DataModel Config"""
alias_generator = snake_to_camel
validate_assignment = True
@validator('display')
def _display(cls, v: str):
"""Normalize "always True" expression for display clause."""
if v is not None and v.lower() == '''tc_action not in ('')''':
v = '1'
return v # pragma: no cover
class ParamsSpecModel(ParamsModel):
"""Model for app_spec.params."""
display: Optional[str] = Field(
None,
description='The display clause from the layout.json file.',
)
disabled: Optional[bool] = Field(
False,
description='If True, the parameter will not be included in ij/lj files.',
)
type: TypeEnum = Field(
TypeEnum.String,
description='',
)
class Config:
"""DataModel Config"""
alias_generator = snake_to_camel
smart_union = True
use_enum_values = True
validate_assignment = True
class PlaybookSpecModel(BaseModel):
"""Model for app_spec.playbook."""
retry: Optional[RetryModel] = Field(
None,
description='',
)
class Config:
"""DataModel Config"""
alias_generator = snake_to_camel
validate_assignment = True
class ReleaseNoteModel(BaseModel):
"""Model for app_spec.releaseNotes."""
notes: List[str] = Field(
...,
description='One or more notes for the release.',
)
version: str = Field(
...,
description='The version of the release.',
)
class Config:
"""DataModel Config"""
alias_generator = snake_to_camel
validate_assignment = True
class SectionsModel(BaseModel):
"""Model for app_spec.sections."""
section_name: str = Field(
...,
description='The name of the section.',
)
params: List[ParamsSpecModel] = Field(
...,
description='A list of input parameter data.',
)
class Config:
"""DataModel Config"""
alias_generator = snake_to_camel
validate_assignment = True
class AppSpecYmlModel(InstallJsonCommonModel):
"""Model for the app_spec.yml file."""
# app_name: str = Field(
# None,
# description='The package name for the App.',
# )
category: str = Field(
...,
description='The category of the App. Also playbook.type for playbook Apps.',
)
note_per_action: Optional[List[NotesPerActionModel]] = Field(
None,
description='',
)
organization: Optional[OrganizationModel] = Field(
None,
description='A section for settings related to the organization (job) Apps.',
)
playbook: Optional[PlaybookSpecModel] = Field(
None,
description='The playbook section of the install.json.',
)
release_notes: List[ReleaseNoteModel] = Field(
...,
description='The release notes for the App.',
)
schema_version: str = Field(
'1.0.0',
description='The version of the App Spec schema.',
)
sections: List[SectionsModel] = Field(
...,
description='Layout sections for an App including params.',
)
output_data: Optional[List[OutputDataModel]] = Field(
None,
description='The outputs data for Playbook and Service Apps.',
)
output_prefix: Optional[str] = Field(
None,
description=(
'The prefix for output variables, used for advanced request outputs. This value '
'should match what is passed to the advanced request method in the playbook App.'
),
)
@validator('schema_version')
def _version(cls, v: str):
"""Return a version object for "version" fields."""
if v is not None:
return Version(v)
return v # pragma: no cover
@validator('output_prefix', always=True, pre=True)
def _output_prefix(cls, v: str, values: dict):
"""Validate output_prefix is set when required."""
if 'advancedRequest' in values.get('features', []):
if v is None:
raise ValueError(
'The outputPrefix field is required when feature advancedRequest is enabled.'
)
else:
# remove output_prefix if not required
v = None
return v
class Config:
"""DataModel Config"""
alias_generator = snake_to_camel
validate_assignment = True
@property
def inputs(self) -> list:
"""Return lj.inputs."""
_inputs = []
for sequence, section in enumerate(self.sections, start=1):
# don't include section with no params
if not [sp for sp in section.params if sp.disabled is False]:
continue
# build params
parameters = []
for p in section.params:
if p.disabled is True:
continue
param = {'name': p.name}
if p.display:
param['display'] = p.display
parameters.append(param)
# append section
_inputs.append(
{
'parameters': parameters,
'sequence': sequence,
'title': section.section_name,
}
)
return _inputs
def get_note_per_action(self, action: str) -> 'NotesPerActionModel':
"""Return the note_per_action for the provided action."""
for npa in self.note_per_action:
if npa.action == action:
return npa
return NoneModel()
@property
def note_per_action_formatted(self) -> List[str]:
"""Return formatted note_per_action."""
_note_per_action = ['\n\nThe following actions are included:']
_note_per_action.extend(
[f'- **{npa.action}** - {npa.note}' for npa in self.note_per_action]
)
return _note_per_action
@property
def outputs(self) -> List[OutputVariablesModel]:
"""Return lj.outputs."""
_outputs = []
for output_data in self.output_data:
for output_variable in output_data.output_variables:
if output_variable.disabled is True:
continue
_outputs.append(
{
'display': output_data.display,
'name': output_variable.name,
}
)
return _outputs
@property
def output_variables(self) -> List[OutputVariablesModel]:
"""Return ij.playbook.outputVariables."""
return [
ov
for output in self.output_data or []
for ov in output.output_variables or []
if ov.disabled is False
]
@property
def params(self) -> List[ParamsModel]:
"""Return ij.params."""
_params = []
sequence = 1
for section in deepcopy(self.sections):
for param in section.params:
if param.disabled is True:
continue
# set default playbookDataType for String type params
self._set_default_playbook_data_type(param)
# set default validValues for String type params
self._set_default_valid_values(param)
# remove the disabled field (not supported in install.json)
param.disabled = None
# remove the display field (not supported in install.json)
param.display = None
# add the sequence number
param.sequence = sequence
_params.append(param)
# increment sequence
sequence += 1
return _params
@property
def release_notes_formatted(self) -> List[str]:
"""Return readme_md.releaseNotes."""
_release_notes = ['## Release Notes']
_release_notes.append('')
for release_note in self.release_notes:
_release_notes.append(f'### {release_note.version}')
_release_notes.append('')
_release_notes.extend([f'* {rn}' for rn in release_note.notes])
_release_notes.append('')
return _release_notes
@property
def requires_layout(self):
"""Return True if App requires a layout.json file."""
if self.runtime_level.lower() == 'organization':
return False
for section in self.sections:
for param in section.params:
if param.display:
return True
for output_data in self.output_data:
if output_data.display not in [None, '1', '']:
return True
return False
def _set_default_playbook_data_type(self, param: 'ParamsSpecModel'):
"""Set default playbookDataType for String type params.
Playbook Data Types rule:
* Input type is "String"
* No "playbookDataType" values are provided
"""
if self.runtime_level.lower() == 'playbook':
# by rule any input of type String must have String and playbookDataType
if (
param.type in ['EditChoice', 'KeyValueList', 'String']
and not param.playbook_data_type
):
param.playbook_data_type = ['String']
def _set_default_valid_values(self, param: 'ParamsSpecModel'):
"""Set default playbookDataType for String type params.
Valid Values rule:
* Input type is "String"
* No "validValues" values are provided
* The playbookDataType supports String
"""
if (
param.type in ['KeyValueList', 'String']
and not param.valid_values
and (
'String' in param.playbook_data_type
or self.runtime_level.lower()
in ['organization', 'triggerservice', 'webhooktriggerservice']
)
):
param.valid_values = ['${TEXT}']
if param.encrypt is True:
param.valid_values = ['${KEYCHAIN}']
| |
import os
import time
import uuid
from kafka import * # noqa
from kafka.common import * # noqa
from kafka.codec import has_gzip, has_snappy
from fixtures import ZookeeperFixture, KafkaFixture
from testutil import *
class TestKafkaProducerIntegration(KafkaIntegrationTestCase):
topic = 'produce_topic'
@classmethod
def setUpClass(cls): # noqa
if not os.environ.get('KAFKA_VERSION'):
return
cls.zk = ZookeeperFixture.instance()
cls.server = KafkaFixture.instance(0, cls.zk.host, cls.zk.port)
@classmethod
def tearDownClass(cls): # noqa
if not os.environ.get('KAFKA_VERSION'):
return
cls.server.close()
cls.zk.close()
@kafka_versions("all")
def test_produce_many_simple(self):
start_offset = self.current_offset(self.topic, 0)
self.assert_produce_request(
[ create_message("Test message %d" % i) for i in range(100) ],
start_offset,
100,
)
self.assert_produce_request(
[ create_message("Test message %d" % i) for i in range(100) ],
start_offset+100,
100,
)
@kafka_versions("all")
def test_produce_10k_simple(self):
start_offset = self.current_offset(self.topic, 0)
self.assert_produce_request(
[ create_message("Test message %d" % i) for i in range(10000) ],
start_offset,
10000,
)
@kafka_versions("all")
def test_produce_many_gzip(self):
start_offset = self.current_offset(self.topic, 0)
message1 = create_gzip_message(["Gzipped 1 %d" % i for i in range(100)])
message2 = create_gzip_message(["Gzipped 2 %d" % i for i in range(100)])
self.assert_produce_request(
[ message1, message2 ],
start_offset,
200,
)
@kafka_versions("all")
def test_produce_many_snappy(self):
self.skipTest("All snappy integration tests fail with nosnappyjava")
start_offset = self.current_offset(self.topic, 0)
self.assert_produce_request([
create_snappy_message(["Snappy 1 %d" % i for i in range(100)]),
create_snappy_message(["Snappy 2 %d" % i for i in range(100)]),
],
start_offset,
200,
)
@kafka_versions("all")
def test_produce_mixed(self):
start_offset = self.current_offset(self.topic, 0)
msg_count = 1+100
messages = [
create_message("Just a plain message"),
create_gzip_message(["Gzipped %d" % i for i in range(100)]),
]
# All snappy integration tests fail with nosnappyjava
if False and has_snappy():
msg_count += 100
messages.append(create_snappy_message(["Snappy %d" % i for i in range(100)]))
self.assert_produce_request(messages, start_offset, msg_count)
@kafka_versions("all")
def test_produce_100k_gzipped(self):
start_offset = self.current_offset(self.topic, 0)
self.assert_produce_request([
create_gzip_message(["Gzipped batch 1, message %d" % i for i in range(50000)])
],
start_offset,
50000,
)
self.assert_produce_request([
create_gzip_message(["Gzipped batch 1, message %d" % i for i in range(50000)])
],
start_offset+50000,
50000,
)
############################
# SimpleProducer Tests #
############################
@kafka_versions("all")
def test_simple_producer(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = SimpleProducer(self.client)
# Goes to first partition, randomly.
resp = producer.send_messages(self.topic, self.msg("one"), self.msg("two"))
self.assert_produce_response(resp, start_offset0)
# Goes to the next partition, randomly.
resp = producer.send_messages(self.topic, self.msg("three"))
self.assert_produce_response(resp, start_offset1)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one"), self.msg("two") ])
self.assert_fetch_offset(1, start_offset1, [ self.msg("three") ])
# Goes back to the first partition because there's only two partitions
resp = producer.send_messages(self.topic, self.msg("four"), self.msg("five"))
self.assert_produce_response(resp, start_offset0+2)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one"), self.msg("two"), self.msg("four"), self.msg("five") ])
producer.stop()
@kafka_versions("all")
def test_producer_random_order(self):
producer = SimpleProducer(self.client, random_start = True)
resp1 = producer.send_messages(self.topic, self.msg("one"), self.msg("two"))
resp2 = producer.send_messages(self.topic, self.msg("three"))
resp3 = producer.send_messages(self.topic, self.msg("four"), self.msg("five"))
self.assertEqual(resp1[0].partition, resp3[0].partition)
self.assertNotEqual(resp1[0].partition, resp2[0].partition)
@kafka_versions("all")
def test_producer_ordered_start(self):
producer = SimpleProducer(self.client, random_start = False)
resp1 = producer.send_messages(self.topic, self.msg("one"), self.msg("two"))
resp2 = producer.send_messages(self.topic, self.msg("three"))
resp3 = producer.send_messages(self.topic, self.msg("four"), self.msg("five"))
self.assertEqual(resp1[0].partition, 0)
self.assertEqual(resp2[0].partition, 1)
self.assertEqual(resp3[0].partition, 0)
@kafka_versions("all")
def test_round_robin_partitioner(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = KeyedProducer(self.client, partitioner=RoundRobinPartitioner)
resp1 = producer.send(self.topic, "key1", self.msg("one"))
resp2 = producer.send(self.topic, "key2", self.msg("two"))
resp3 = producer.send(self.topic, "key3", self.msg("three"))
resp4 = producer.send(self.topic, "key4", self.msg("four"))
self.assert_produce_response(resp1, start_offset0+0)
self.assert_produce_response(resp2, start_offset1+0)
self.assert_produce_response(resp3, start_offset0+1)
self.assert_produce_response(resp4, start_offset1+1)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one"), self.msg("three") ])
self.assert_fetch_offset(1, start_offset1, [ self.msg("two"), self.msg("four") ])
producer.stop()
@kafka_versions("all")
def test_hashed_partitioner(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = KeyedProducer(self.client, partitioner=HashedPartitioner)
resp1 = producer.send(self.topic, 1, self.msg("one"))
resp2 = producer.send(self.topic, 2, self.msg("two"))
resp3 = producer.send(self.topic, 3, self.msg("three"))
resp4 = producer.send(self.topic, 3, self.msg("four"))
resp5 = producer.send(self.topic, 4, self.msg("five"))
self.assert_produce_response(resp1, start_offset1+0)
self.assert_produce_response(resp2, start_offset0+0)
self.assert_produce_response(resp3, start_offset1+1)
self.assert_produce_response(resp4, start_offset1+2)
self.assert_produce_response(resp5, start_offset0+1)
self.assert_fetch_offset(0, start_offset0, [ self.msg("two"), self.msg("five") ])
self.assert_fetch_offset(1, start_offset1, [ self.msg("one"), self.msg("three"), self.msg("four") ])
producer.stop()
@kafka_versions("all")
def test_acks_none(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = SimpleProducer(self.client, req_acks=SimpleProducer.ACK_NOT_REQUIRED)
resp = producer.send_messages(self.topic, self.msg("one"))
self.assertEquals(len(resp), 0)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one") ])
producer.stop()
@kafka_versions("all")
def test_acks_local_write(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = SimpleProducer(self.client, req_acks=SimpleProducer.ACK_AFTER_LOCAL_WRITE)
resp = producer.send_messages(self.topic, self.msg("one"))
self.assert_produce_response(resp, start_offset0)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one") ])
producer.stop()
@kafka_versions("all")
def test_acks_cluster_commit(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = SimpleProducer(
self.client,
req_acks=SimpleProducer.ACK_AFTER_CLUSTER_COMMIT)
resp = producer.send_messages(self.topic, self.msg("one"))
self.assert_produce_response(resp, start_offset0)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one") ])
producer.stop()
@kafka_versions("all")
def test_batched_simple_producer__triggers_by_message(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = SimpleProducer(self.client,
batch_send=True,
batch_send_every_n=5,
batch_send_every_t=20)
# Send 5 messages and do a fetch
resp = producer.send_messages(self.topic,
self.msg("one"),
self.msg("two"),
self.msg("three"),
self.msg("four"),
)
# Batch mode is async. No ack
self.assertEquals(len(resp), 0)
# It hasn't sent yet
self.assert_fetch_offset(0, start_offset0, [])
self.assert_fetch_offset(1, start_offset1, [])
resp = producer.send_messages(self.topic,
self.msg("five"),
self.msg("six"),
self.msg("seven"),
)
# Batch mode is async. No ack
self.assertEquals(len(resp), 0)
self.assert_fetch_offset(0, start_offset0, [
self.msg("one"),
self.msg("two"),
self.msg("three"),
self.msg("four"),
])
self.assert_fetch_offset(1, start_offset1, [
self.msg("five"),
# self.msg("six"),
# self.msg("seven"),
])
producer.stop()
@kafka_versions("all")
def test_batched_simple_producer__triggers_by_time(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = SimpleProducer(self.client,
batch_send=True,
batch_send_every_n=100,
batch_send_every_t=5)
# Send 5 messages and do a fetch
resp = producer.send_messages(self.topic,
self.msg("one"),
self.msg("two"),
self.msg("three"),
self.msg("four"),
)
# Batch mode is async. No ack
self.assertEquals(len(resp), 0)
# It hasn't sent yet
self.assert_fetch_offset(0, start_offset0, [])
self.assert_fetch_offset(1, start_offset1, [])
resp = producer.send_messages(self.topic,
self.msg("five"),
self.msg("six"),
self.msg("seven"),
)
# Batch mode is async. No ack
self.assertEquals(len(resp), 0)
# Wait the timeout out
time.sleep(5)
self.assert_fetch_offset(0, start_offset0, [
self.msg("one"),
self.msg("two"),
self.msg("three"),
self.msg("four"),
])
self.assert_fetch_offset(1, start_offset1, [
self.msg("five"),
self.msg("six"),
self.msg("seven"),
])
producer.stop()
@kafka_versions("all")
def test_async_simple_producer(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = SimpleProducer(self.client, async=True)
resp = producer.send_messages(self.topic, self.msg("one"))
self.assertEquals(len(resp), 0)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one") ])
producer.stop()
@kafka_versions("all")
def test_async_keyed_producer(self):
start_offset0 = self.current_offset(self.topic, 0)
start_offset1 = self.current_offset(self.topic, 1)
producer = KeyedProducer(self.client, partitioner = RoundRobinPartitioner, async=True)
resp = producer.send(self.topic, "key1", self.msg("one"))
self.assertEquals(len(resp), 0)
self.assert_fetch_offset(0, start_offset0, [ self.msg("one") ])
producer.stop()
def assert_produce_request(self, messages, initial_offset, message_ct):
produce = ProduceRequest(self.topic, 0, messages=messages)
# There should only be one response message from the server.
# This will throw an exception if there's more than one.
resp = self.client.send_produce_request([ produce ])
self.assert_produce_response(resp, initial_offset)
self.assertEqual(self.current_offset(self.topic, 0), initial_offset + message_ct)
def assert_produce_response(self, resp, initial_offset):
self.assertEqual(len(resp), 1)
self.assertEqual(resp[0].error, 0)
self.assertEqual(resp[0].offset, initial_offset)
def assert_fetch_offset(self, partition, start_offset, expected_messages):
# There should only be one response message from the server.
# This will throw an exception if there's more than one.
resp, = self.client.send_fetch_request([ FetchRequest(self.topic, partition, start_offset, 1024) ])
self.assertEquals(resp.error, 0)
self.assertEquals(resp.partition, partition)
messages = [ x.message.value for x in resp.messages ]
self.assertEqual(messages, expected_messages)
self.assertEquals(resp.highwaterMark, start_offset+len(expected_messages))
| |
#!/usr/bin/env python
# Load common imports and system envs to build the core object
import sys, os
# For running inside the docker container use:
#import matplotlib
#matplotlib.use('Agg')
# Load the Environment:
os.environ["ENV_DEPLOYMENT_TYPE"] = "JustRedis"
from src.common.inits_for_python import *
#####################################################################
#
# Start Arg Processing:
#
action = "ML Regressor"
parser = argparse.ArgumentParser(description="Parser for Action: " + str(action))
parser.add_argument('-f', '--csvfile', help='CSV File', dest='csvfile')
parser.add_argument('-n', '--dsname', help='Dataset Name', dest='ds_name')
parser.add_argument('-b', '--s3bucket', help='S3 Bucket (Optional)', dest='s_bucket')
parser.add_argument('-k', '--s3key', help='S3 Key (Optional)', dest='s_key')
parser.add_argument('-u', '--usedate', help='Use Date', dest='usedate')
parser.add_argument("-d", "--debug", help="Debug Flag", dest='debug', action='store_true')
args = parser.parse_args()
if args.debug:
debug = True
core.enable_debug()
ds_name = "iris_regressor"
if args.ds_name:
ds_name = str(args.ds_name).strip().lstrip()
now = datetime.datetime.now()
cur_date = now
cur_date_str = now.strftime("%Y-%m-%d")
if args.usedate:
cur_date_str = str(args.usedate)
send_email = "1" # by default send email
s3_bucket = "demodatasets"
s3_key = "dataset_" + str(str(ds_name).upper().strip().lstrip()) + "_" + str(cur_date_str) + ".csv"
analysis_version = 2
if args.s_bucket:
s3_bucket = str(args.s_bucket)
if args.s_key:
s3_key = str(args.s_key)
dataset_filename = "iris.csv"
ml_csv = str(os.getenv("ENV_DATA_SRC_DIR", "/opt/work/data/src")) + "/" + dataset_filename
if args.csvfile:
ml_csv = str(args.csvfile)
#
# End Arg Processing
#
#####################################################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
if os.path.exists(ml_csv) == False:
if os.path.exists("/opt/work/examples/datasets/iris.csv"):
org_path = "/opt/work/examples/datasets/iris.csv"
os.system("cp " + str(org_path) + " " + ml_csv)
elif os.path.exists(os.getenv("ENV_PROJ_REPO_DIR", "/opt/work") + "/examples/datasets/iris.csv"):
org_path = os.getenv("ENV_PROJ_REPO_DIR", "/opt/work") + "/examples/datasets/iris.csv"
os.system("cp " + str(org_path) + " " + ml_csv)
else:
lg("Recreating iris dataset: /opt/work/bins/ml/downloaders/download_iris.py", 6)
os.system("/opt/work/bins/ml/downloaders/download_iris.py")
if os.path.exists(ml_csv) == False:
lg("Failed to recreate iris dataset with: /opt/work/bins/ml/downloaders/download_iris.py", 0)
lg("Stopping", 6)
sys.exit(1)
# end of checking if the csv file is available
lg("Processing ML Predictions for CSV(" + str(ml_csv) + ")", 6)
max_features_to_display = 10
num_estimators = 200
show_importance_plot = True
show_confusion_plot = True
random_state = 0
# For forecasting:
units_ahead_set = []
units_ahead = 0
now = datetime.datetime.now()
title_prefix = ds_name
confusion_plot_title = ds_name + " - Random Forest Confusion Matrix\nThe darker the square on the diagonal the better the predictions\n\n"
featimp_plot_title = ds_name + " - Feature Importance with Estimators(" + str(num_estimators) + ")"
row_names = [ "Actual" ] # CM - Y Axis
col_names = [ "Predictions" ] # CM - X Axis
num_jobs = 8
ranked_features = []
org_ranked_features = []
ml_type = "Predict with Filter"
ml_algo_name = "xgb-regressor"
price_min = 0.10
train_test_ratio = 0.1
# What column has the labeled targets as integers? (added-manually to the dataset)
target_column_name = "ResultLabel"
# possible values in the Target Column
target_column_values = [ "Iris-setosa", "Iris-versicolor", "Iris-virginica" ]
# What columns can the algorithms use for training and learning?
feature_column_names = [ "SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "ResultTargetValue" ]
# What column holds string labels for the Target Column?
label_column_name = "ResultLabel"
ignore_features = [ # Prune non-int/float columns as needed:
target_column_name,
label_column_name
]
algo_nodes = []
forcast_df = None
ml_request = {
"MLType" : ml_type,
"MLAlgo" : {
"Name" : ml_algo_name,
"Version" : 1,
"Meta" : {
"UnitsAhead" : units_ahead,
"DatasetName" : ds_name,
"FilterMask" : None,
"Source" : {
"CSVFile" : ml_csv,
"S3File" : "", # <Bucket Name>:<Key>
"RedisKey" : "" # <App Name>:<Key>
},
},
"Steps" : {
"Train" :{
"LearningRate" : 0.1,
"NumEstimators" : 1000,
"Objective" : "reg:linear",
"MaxDepth" : 6,
"MaxDeltaStep" : 0,
"MinChildWeight" : 1,
"Gamma" : 0,
"SubSample" : 0.8,
"ColSampleByTree" : 0.8,
"ColSampleByLevel" : 1.0,
"RegAlpha" : 0,
"RegLambda" : 1,
"BaseScore" : 0.5,
"NumThreads" : -1, # infinite = -1
"ScaledPositionWeight" : 1,
"Seed" : 27,
"Debug" : True
}
}
},
"FeatureColumnNames": feature_column_names,
"TargetColumnName" : target_column_name,
"TargetColumnValues": target_column_values,
"IgnoreFeatures" : ignore_features,
"UnitsAheadSet" : units_ahead_set,
"UnitsAheadType" : "",
"PredictionType" : "Predict",
"MaxFeatures" : 10,
"Version" : 1,
"TrackingType" : "UseTargetColAndUnits",
"TrackingName" : core.to_upper(ds_name),
"TrackingID" : "ML_" + ds_name + "_" + str(core.build_unique_key()),
"Debug" : False
}
# Load dataset to build
csv_res = core.ml_load_csv_dataset(ml_request, core.get_rds(), core.get_dbs(), debug)
if csv_res["Status"] != "SUCCESS":
lg("ERROR: Failed to Load CSV(" + str(ml_request["MLAlgo"]["Meta"]["Source"]["CSVFile"]) + ")", 0)
sys.exit(1)
ds_df = csv_res["Record"]["SourceDF"]
# Build a Filter for pruning bad records out before creating the train/test sets
samples_filter_mask = (ds_df["SepalLength"] > 0.0) \
& (ds_df["PetalWidth"] > 0.0)
# For patching on the fly you can use the encoder method to replace labels with target dictionary values:
#ready_df = core.ml_encode_target_column(ds_df, "ResultLabel", "Target")
show_pair_plot = False
if show_pair_plot:
lg("Samples(" + str(len(ds_df.index)) + ") in CSV(" + str(ml_request["MLAlgo"]["Meta"]["Source"]["CSVFile"]) + ")", 6)
lg("")
print ds_df.describe()
lg("")
num_per_class = ds_df.groupby("ResultLabel").size()
print num_per_class
lg("")
pair_plot_req = {
"Title" : "Iris Dataset PairPlot",
"SourceDF" : ds_df[samples_filter_mask],
"Style" : "default",
"DiagKind" : "hist", # "kde" or "hist"
"HueColumnName" : ml_request["TargetColumnName"],
"XLabel" : "",
"YLabel" : "",
"CompareColumns": ml_request["FeatureColumnNames"],
"Size" : 3.0,
"ImgFile" : str(os.getenv("ENV_DATA_SRC_DIR", "/opt/work/data/src")) + "/" + "validate_jupyter_iris_classification_pairplot.png",
"ShowPlot" : True
}
core.sb_pair_plot(pair_plot_req)
if os.path.exists(pair_plot_req["ImgFile"]):
lg("Done Plotting Valiation Pair Plot - Saved(" + str(pair_plot_req["ImgFile"]) + ")", 5)
else:
lg("Failed to save Validation Pair Plot(" + str(pair_plot_req["ImgFile"]) + "). Please check the ENV_DATA_SRC_DIR is writeable by this user and exposed to the docker container correctly.", 0)
# end of showing a pairplot for validation
# Create a Prediction Column
ml_request["MLAlgo"]["Meta"]["SamplesFilterMask"] = samples_filter_mask
# Create a Result Column
core.enable_debug()
ml_images = []
train_results = core.ml_train_models_for_predictions(ml_request, core.get_rds(), core.get_dbs(), debug)
if train_results["Status"] != "SUCCESS":
lg("ERROR: Failed to Train Models for Predictions with Error(" + str(train_results["Error"]) + ") StoppedEarly(" + str(train_results["Record"]["StoppedEarly"]) + ")", 0)
sys.exit(1)
algo_nodes = train_results["Record"]["AlgoNodes"]
predict_row = {
"SepalLength" : 5.4,
"SepalWidth" : 3.4,
"PetalLength" : 1.7,
"PetalWidth" : 0.2,
"ResultTargetValue" : 0
}
predict_row_df = pd.DataFrame(predict_row, index=[0])
predict_req = {
"AlgoNodes" : algo_nodes,
"PredictionMask": samples_filter_mask,
"PredictionRow" : predict_row_df
}
predict_results = core.ml_compile_predictions_from_models(predict_req, core.get_rds(), core.get_dbs(), debug)
if predict_results["Status"] != "SUCCESS":
lg("ERROR: Failed to Compile Predictions from Models with Error(" + str(predict_results["Error"]) + ")", 0)
sys.exit(1)
lg("Done with Predictions", 6)
if predict_results["Status"] == "SUCCESS":
al_req = train_results["Record"]
al_req["DSName"] = ml_request["TrackingName"]
al_req["Version"] = 1
al_req["FeatureColumnNames"]= ml_request["FeatureColumnNames"]
al_req["TargetColumnName"] = ml_request["TargetColumnName"]
al_req["TargetColumnValues"]= ml_request["TargetColumnValues"]
al_req["IgnoreFeatures"] = ml_request["IgnoreFeatures"]
al_req["PredictionType"] = ml_request["PredictionType"]
al_req["ConfMatrices"] = predict_results["Record"]["ConfMatrices"]
al_req["PredictionMarkers"] = predict_results["Record"]["PredictionMarkers"]
analysis_dataset = core.ml_compile_analysis_dataset(al_req, core.get_rds(), core.get_dbs(), debug)
lg("Analyzed Models(" + str(len(analysis_dataset["Models"])) + ")", 6)
lg("-----------------------------------------------------", 6)
lg("Caching Models", 6)
cache_req = {
"Name" : "CACHE",
"Key" : "_MODELS_" + str(al_req["Tracking"]["TrackingName"]) + "_LATEST",
"TrackingID": "_MD_" + str(al_req["Tracking"]["TrackingName"]),
"Analysis" : analysis_dataset
}
cache_results = core.ml_cache_analysis_and_models(cache_req, core.get_rds(), core.get_dbs(), debug)
lg("Done Caching Models", 6)
lg("-----------------------------------------------------", 6)
lg("Creating Analysis Visualizations", 6)
# Turn this on to show the images:
analysis_dataset["ShowPlot"] = True
analysis_dataset["SourceDF"] = al_req["SourceDF"]
lg("Plotting Feature Importance", 6)
for midx,model_node in enumerate(analysis_dataset["Models"]):
predict_col = model_node["Target"]
if predict_col == "ResultTargetValue":
plot_req = {
"ImgFile" : analysis_dataset["FeatImpImgFile"],
"Model" : model_node["Model"],
"XLabel" : str(predict_col),
"YLabel" : "Importance Amount",
"Title" : str(predict_col) + " Importance Analysis",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_model_feature_importance(plot_req, debug)
for img in image_list:
ml_images.append(img)
# end of for all models
lg("Plotting PairPlots", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Pair Plot",
"ImgFile" : str(analysis_dataset["PairPlotImgFile"]),
"SourceDF" : al_req["SourceDF"],
"HueColumnName" : target_column_name,
"CompareColumns": feature_column_names,
"Markers" : ["o", "s", "D"],
"Width" : 15.0,
"Height" : 15.0,
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_pairplot(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting Confusion Matrices", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Confusion Matrix",
"ImgFile" : str(analysis_dataset["CMatrixImgFile"]),
"SourceDF" : al_req["SourceDF"],
"ConfMatrices" : al_req["ConfMatrices"],
"Width" : 15.0,
"Height" : 15.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_confusion_matrix(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting Scatters", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Scatter Plot",
"ImgFile" : str(analysis_dataset["ScatterImgFile"]),
"SourceDF" : analysis_dataset["SourceDF"],
"UnitsAheadType" : analysis_dataset["UnitsAheadType"],
"FeatureColumnNames": analysis_dataset["FeatureColumnNames"],
"Hue" : label_column_name,
"Width" : 7.0,
"Height" : 7.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_all_scatterplots(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting JointPlots", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Joint Plot",
"ImgFile" : str(analysis_dataset["JointPlotImgFile"]),
"SourceDF" : analysis_dataset["SourceDF"],
"UnitsAheadType" : analysis_dataset["UnitsAheadType"],
"FeatureColumnNames": analysis_dataset["FeatureColumnNames"],
"Hue" : label_column_name,
"Width" : 15.0,
"Height" : 15.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_all_jointplots(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Done Creating Analysis Visualizations", 6)
lg("-----------------------------------------------------", 6)
else:
lg("", 6)
lg("ERROR: Failed Processing Predictions for Dataset(" + str(ds_name) + ") with Error:", 6)
lg(ml_results["Error"], 6)
lg("", 6)
sys.exit(2)
# end of if success
lg("", 6)
lg("Analysis Complete Saved Images(" + str(len(ml_images)) + ")", 5)
lg("", 6)
sys.exit(0)
| |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"""
Classes related to the build phase (build, clean, install, step, etc)
The inheritance tree is the following:
"""
import os, sys, errno, re, shutil
try:
import cPickle
except ImportError:
import pickle as cPickle
from waflib import Runner, TaskGen, Utils, ConfigSet, Task, Logs, Options, Context, Errors
import waflib.Node
CACHE_DIR = 'c4che'
"""Location of the cache files"""
CACHE_SUFFIX = '_cache.py'
"""Suffix for the cache files"""
INSTALL = 1337
"""Positive value '->' install, see :py:attr:`waflib.Build.BuildContext.is_install`"""
UNINSTALL = -1337
"""Negative value '<-' uninstall, see :py:attr:`waflib.Build.BuildContext.is_install`"""
SAVED_ATTRS = 'root node_deps raw_deps task_sigs'.split()
"""Build class members to save between the runs (root, node_deps, raw_deps, task_sigs)"""
CFG_FILES = 'cfg_files'
"""Files from the build directory to hash before starting the build (``config.h`` written during the configuration)"""
POST_AT_ONCE = 0
"""Post mode: all task generators are posted before the build really starts"""
POST_LAZY = 1
"""Post mode: post the task generators group after group"""
POST_BOTH = 2
"""Post mode: post the task generators at once, then re-check them for each group"""
class BuildContext(Context.Context):
'''executes the build'''
cmd = 'build'
variant = ''
def __init__(self, **kw):
super(BuildContext, self).__init__(**kw)
self.is_install = 0
"""Non-zero value when installing or uninstalling file"""
self.top_dir = kw.get('top_dir', Context.top_dir)
self.run_dir = kw.get('run_dir', Context.run_dir)
self.post_mode = POST_AT_ONCE
"""post the task generators at once, group-by-group, or both"""
# output directory - may be set until the nodes are considered
self.out_dir = kw.get('out_dir', Context.out_dir)
self.cache_dir = kw.get('cache_dir', None)
if not self.cache_dir:
self.cache_dir = self.out_dir + os.sep + CACHE_DIR
# map names to environments, the '' must be defined
self.all_envs = {}
# ======================================= #
# cache variables
self.task_sigs = {}
"""Signatures of the tasks (persists between build executions)"""
self.node_deps = {}
"""Dict of node dependencies found by :py:meth:`waflib.Task.Task.scan` (persists between build executions)"""
self.raw_deps = {}
"""Dict of custom data returned by :py:meth:`waflib.Task.Task.scan` (persists between build executions)"""
# list of folders that are already scanned
# so that we do not need to stat them one more time
self.cache_dir_contents = {}
self.task_gen_cache_names = {}
self.launch_dir = Context.launch_dir
self.jobs = Options.options.jobs
self.targets = Options.options.targets
self.keep = Options.options.keep
self.cache_global = Options.cache_global
self.nocache = Options.options.nocache
self.progress_bar = Options.options.progress_bar
############ stuff below has not been reviewed
# Manual dependencies.
self.deps_man = Utils.defaultdict(list)
"""Manual dependencies set by :py:meth:`waflib.Build.BuildContext.add_manual_dependency`"""
# just the structure here
self.current_group = 0
"""
Current build group
"""
self.groups = []
"""
List containing lists of task generators
"""
self.group_names = {}
"""
Map group names to the group lists. See :py:meth:`waflib.Build.BuildContext.add_group`
"""
def get_variant_dir(self):
"""Getter for the variant_dir attribute"""
if not self.variant:
return self.out_dir
return os.path.join(self.out_dir, self.variant)
variant_dir = property(get_variant_dir, None)
def __call__(self, *k, **kw):
"""
Create a task generator and add it to the current build group. The following forms are equivalent::
def build(bld):
tg = bld(a=1, b=2)
def build(bld):
tg = bld()
tg.a = 1
tg.b = 2
def build(bld):
tg = TaskGen.task_gen(a=1, b=2)
bld.add_to_group(tg, None)
:param group: group name to add the task generator to
:type group: string
"""
kw['bld'] = self
ret = TaskGen.task_gen(*k, **kw)
self.task_gen_cache_names = {} # reset the cache, each time
self.add_to_group(ret, group=kw.get('group', None))
return ret
def rule(self, *k, **kw):
"""
Wrapper for creating a task generator using the decorator notation. The following code::
@bld.rule(
target = "foo"
)
def _(tsk):
print("bar")
is equivalent to::
def bar(tsk):
print("bar")
bld(
target = "foo",
rule = bar,
)
"""
def f(rule):
ret = self(*k, **kw)
ret.rule = rule
return ret
return f
def __copy__(self):
"""Implemented to prevents copies of build contexts (raises an exception)"""
raise Errors.WafError('build contexts are not supposed to be copied')
def install_files(self, *k, **kw):
"""Actual implementation provided by :py:meth:`waflib.Build.InstallContext.install_files`"""
pass
def install_as(self, *k, **kw):
"""Actual implementation provided by :py:meth:`waflib.Build.InstallContext.install_as`"""
pass
def symlink_as(self, *k, **kw):
"""Actual implementation provided by :py:meth:`waflib.Build.InstallContext.symlink_as`"""
pass
def load_envs(self):
"""
The configuration command creates files of the form ``build/c4che/NAMEcache.py``. This method
creates a :py:class:`waflib.ConfigSet.ConfigSet` instance for each ``NAME`` by reading those
files. The config sets are then stored in the dict :py:attr:`waflib.Build.BuildContext.allenvs`.
"""
node = self.root.find_node(self.cache_dir)
if not node:
raise Errors.WafError('The project was not configured: run "waf configure" first!')
lst = node.ant_glob('**/*%s' % CACHE_SUFFIX, quiet=True)
if not lst:
raise Errors.WafError('The cache directory is empty: reconfigure the project')
for x in lst:
name = x.path_from(node).replace(CACHE_SUFFIX, '').replace('\\', '/')
env = ConfigSet.ConfigSet(x.abspath())
self.all_envs[name] = env
for f in env[CFG_FILES]:
newnode = self.root.find_resource(f)
try:
h = Utils.h_file(newnode.abspath())
except (IOError, AttributeError):
Logs.error('cannot find %r' % f)
h = Utils.SIG_NIL
newnode.sig = h
def init_dirs(self):
"""
Initialize the project directory and the build directory by creating the nodes
:py:attr:`waflib.Build.BuildContext.srcnode` and :py:attr:`waflib.Build.BuildContext.bldnode`
corresponding to ``top_dir`` and ``variant_dir`` respectively. The ``bldnode`` directory will be
created if it does not exist.
"""
if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)):
raise Errors.WafError('The project was not configured: run "waf configure" first!')
self.path = self.srcnode = self.root.find_dir(self.top_dir)
self.bldnode = self.root.make_node(self.variant_dir)
self.bldnode.mkdir()
def execute(self):
"""
Restore the data from previous builds and call :py:meth:`waflib.Build.BuildContext.execute_build`. Overrides from :py:func:`waflib.Context.Context.execute`
"""
self.restore()
if not self.all_envs:
self.load_envs()
self.execute_build()
def execute_build(self):
"""
Execute the build by:
* reading the scripts (see :py:meth:`waflib.Context.Context.recurse`)
* calling :py:meth:`waflib.Build.BuildContext.pre_build` to call user build functions
* calling :py:meth:`waflib.Build.BuildContext.compile` to process the tasks
* calling :py:meth:`waflib.Build.BuildContext.post_build` to call user build functions
"""
Logs.info("Waf: Entering directory `%s'" % self.variant_dir)
self.recurse([self.run_dir])
self.pre_build()
# display the time elapsed in the progress bar
self.timer = Utils.Timer()
if self.progress_bar:
sys.stderr.write(Logs.colors.cursor_off)
try:
self.compile()
finally:
if self.progress_bar == 1:
c = len(self.returned_tasks) or 1
self.to_log(self.progress_line(c, c, Logs.colors.BLUE, Logs.colors.NORMAL))
print('')
sys.stdout.flush()
sys.stderr.write(Logs.colors.cursor_on)
Logs.info("Waf: Leaving directory `%s'" % self.variant_dir)
self.post_build()
def restore(self):
"""
Load the data from a previous run, sets the attributes listed in :py:const:`waflib.Build.SAVED_ATTRS`
"""
try:
env = ConfigSet.ConfigSet(os.path.join(self.cache_dir, 'build.config.py'))
except (IOError, OSError):
pass
else:
if env['version'] < Context.HEXVERSION:
raise Errors.WafError('Version mismatch! reconfigure the project')
for t in env['tools']:
self.setup(**t)
dbfn = os.path.join(self.variant_dir, Context.DBFILE)
try:
data = Utils.readf(dbfn, 'rb')
except (IOError, EOFError):
# handle missing file/empty file
Logs.debug('build: Could not load the build cache %s (missing)' % dbfn)
else:
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3 = self.node_class
try:
data = cPickle.loads(data)
except Exception as e:
Logs.debug('build: Could not pickle the build cache %s: %r' % (dbfn, e))
else:
for x in SAVED_ATTRS:
setattr(self, x, data[x])
finally:
waflib.Node.pickle_lock.release()
self.init_dirs()
def store(self):
"""
Store the data for next runs, sets the attributes listed in :py:const:`waflib.Build.SAVED_ATTRS`. Uses a temporary
file to avoid problems on ctrl+c.
"""
data = {}
for x in SAVED_ATTRS:
data[x] = getattr(self, x)
db = os.path.join(self.variant_dir, Context.DBFILE)
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3 = self.node_class
x = cPickle.dumps(data, -1)
finally:
waflib.Node.pickle_lock.release()
Utils.writef(db + '.tmp', x, m='wb')
try:
st = os.stat(db)
os.remove(db)
if not Utils.is_win32: # win32 has no chown but we're paranoid
os.chown(db + '.tmp', st.st_uid, st.st_gid)
except (AttributeError, OSError):
pass
# do not use shutil.move (copy is not thread-safe)
os.rename(db + '.tmp', db)
def compile(self):
"""
Run the build by creating an instance of :py:class:`waflib.Runner.Parallel`
The cache file is not written if the build is up to date (no task executed).
"""
Logs.debug('build: compile()')
# use another object to perform the producer-consumer logic (reduce the complexity)
self.producer = Runner.Parallel(self, self.jobs)
self.producer.biter = self.get_build_iterator()
self.returned_tasks = [] # not part of the API yet
try:
self.producer.start()
except KeyboardInterrupt:
self.store()
raise
else:
if self.producer.dirty:
self.store()
if self.producer.error:
raise Errors.BuildError(self.producer.error)
def setup(self, tool, tooldir=None, funs=None):
"""
Import waf tools, used to import those accessed during the configuration::
def configure(conf):
conf.load('glib2')
def build(bld):
pass # glib2 is imported implicitly
:param tool: tool list
:type tool: list
:param tooldir: optional tool directory (sys.path)
:type tooldir: list of string
:param funs: unused variable
"""
if isinstance(tool, list):
for i in tool: self.setup(i, tooldir)
return
module = Context.load_tool(tool, tooldir)
if hasattr(module, "setup"): module.setup(self)
def get_env(self):
"""Getter for the env property"""
try:
return self.all_envs[self.variant]
except KeyError:
return self.all_envs['']
def set_env(self, val):
"""Setter for the env property"""
self.all_envs[self.variant] = val
env = property(get_env, set_env)
def add_manual_dependency(self, path, value):
"""
Adds a dependency from a node object to a value::
def build(bld):
bld.add_manual_dependency(
bld.path.find_resource('wscript'),
bld.root.find_resource('/etc/fstab'))
:param path: file path
:type path: string or :py:class:`waflib.Node.Node`
:param value: value to depend on
:type value: :py:class:`waflib.Node.Node`, string, or function returning a string
"""
if path is None:
raise ValueError('Invalid input')
if isinstance(path, waflib.Node.Node):
node = path
elif os.path.isabs(path):
node = self.root.find_resource(path)
else:
node = self.path.find_resource(path)
if isinstance(value, list):
self.deps_man[id(node)].extend(value)
else:
self.deps_man[id(node)].append(value)
def launch_node(self):
"""Returns the launch directory as a :py:class:`waflib.Node.Node` object"""
try:
# private cache
return self.p_ln
except AttributeError:
self.p_ln = self.root.find_dir(self.launch_dir)
return self.p_ln
def hash_env_vars(self, env, vars_lst):
"""
Hash configuration set variables::
def build(bld):
bld.hash_env_vars(bld.env, ['CXX', 'CC'])
:param env: Configuration Set
:type env: :py:class:`waflib.ConfigSet.ConfigSet`
:param vars_lst: list of variables
:type vars_list: list of string
"""
if not env.table:
env = env.parent
if not env:
return Utils.SIG_NIL
idx = str(id(env)) + str(vars_lst)
try:
cache = self.cache_env
except AttributeError:
cache = self.cache_env = {}
else:
try:
return self.cache_env[idx]
except KeyError:
pass
lst = [env[a] for a in vars_lst]
ret = Utils.h_list(lst)
Logs.debug('envhash: %s %r', Utils.to_hex(ret), lst)
cache[idx] = ret
return ret
def get_tgen_by_name(self, name):
"""
Retrieves a task generator from its name or its target name
the name must be unique::
def build(bld):
tg = bld(name='foo')
tg == bld.get_tgen_by_name('foo')
"""
cache = self.task_gen_cache_names
if not cache:
# create the index lazily
for g in self.groups:
for tg in g:
try:
cache[tg.name] = tg
except AttributeError:
# raised if not a task generator, which should be uncommon
pass
try:
return cache[name]
except KeyError:
raise Errors.WafError('Could not find a task generator for the name %r' % name)
def progress_line(self, state, total, col1, col2):
"""
Compute the progress bar used by ``waf -p``
"""
n = len(str(total))
Utils.rot_idx += 1
ind = Utils.rot_chr[Utils.rot_idx % 4]
pc = (100.*state)/total
eta = str(self.timer)
fs = "[%%%dd/%%%dd][%%s%%2d%%%%%%s][%s][" % (n, n, ind)
left = fs % (state, total, col1, pc, col2)
right = '][%s%s%s]' % (col1, eta, col2)
cols = Logs.get_term_cols() - len(left) - len(right) + 2*len(col1) + 2*len(col2)
if cols < 7: cols = 7
ratio = ((cols*state)//total) - 1
bar = ('='*ratio+'>').ljust(cols)
msg = Utils.indicator % (left, bar, right)
return msg
def declare_chain(self, *k, **kw):
"""
Wrapper for :py:func:`waflib.TaskGen.declare_chain` provided for convenience
"""
return TaskGen.declare_chain(*k, **kw)
def pre_build(self):
"""Execute user-defined methods before the build starts, see :py:meth:`waflib.Build.BuildContext.add_pre_fun`"""
for m in getattr(self, 'pre_funs', []):
m(self)
def post_build(self):
"""Executes the user-defined methods after the build is successful, see :py:meth:`waflib.Build.BuildContext.add_post_fun`"""
for m in getattr(self, 'post_funs', []):
m(self)
def add_pre_fun(self, meth):
"""
Bind a method to execute after the scripts are read and before the build starts::
def mycallback(bld):
print("Hello, world!")
def build(bld):
bld.add_pre_fun(mycallback)
"""
try:
self.pre_funs.append(meth)
except AttributeError:
self.pre_funs = [meth]
def add_post_fun(self, meth):
"""
Bind a method to execute immediately after the build is successful::
def call_ldconfig(bld):
bld.exec_command('/sbin/ldconfig')
def build(bld):
if bld.cmd == 'install':
bld.add_pre_fun(call_ldconfig)
"""
try:
self.post_funs.append(meth)
except AttributeError:
self.post_funs = [meth]
def get_group(self, x):
"""
Get the group x, or return the current group if x is None
:param x: name or number or None
:type x: string, int or None
"""
if not self.groups:
self.add_group()
if x is None:
return self.groups[self.current_group]
if x in self.group_names:
return self.group_names[x]
return self.groups[x]
def add_to_group(self, tgen, group=None):
"""add a task or a task generator for the build"""
# paranoid
assert(isinstance(tgen, TaskGen.task_gen) or isinstance(tgen, Task.TaskBase))
tgen.bld = self
self.get_group(group).append(tgen)
def get_group_name(self, g):
"""name for the group g (utility)"""
if not isinstance(g, list):
g = self.groups[g]
for x in self.group_names:
if id(self.group_names[x]) == id(g):
return x
return ''
def get_group_idx(self, tg):
"""
Index of the group containing the task generator given as argument::
def build(bld):
tg = bld(name='nada')
0 == bld.get_group_idx(tg)
:param tg: Task generator object
:type tg: :py:class:`waflib.TaskGen.task_gen`
"""
se = id(tg)
for i in range(len(self.groups)):
for t in self.groups[i]:
if id(t) == se:
return i
return None
def add_group(self, name=None, move=True):
"""
Add a new group of tasks/task generators. By default the new group becomes the default group for new task generators.
:param name: name for this group
:type name: string
:param move: set the group created as default group (True by default)
:type move: bool
"""
#if self.groups and not self.groups[0].tasks:
# error('add_group: an empty group is already present')
if name and name in self.group_names:
Logs.error('add_group: name %s already present' % name)
g = []
self.group_names[name] = g
self.groups.append(g)
if move:
self.current_group = len(self.groups) - 1
def set_group(self, idx):
"""
Set the current group to be idx: now new task generators will be added to this group by default::
def build(bld):
bld(rule='touch ${TGT}', target='foo.txt')
bld.add_group() # now the current group is 1
bld(rule='touch ${TGT}', target='bar.txt')
bld.set_group(0) # now the current group is 0
bld(rule='touch ${TGT}', target='truc.txt') # build truc.txt before bar.txt
:param idx: group name or group index
:type idx: string or int
"""
if isinstance(idx, str):
g = self.group_names[idx]
for i in range(len(self.groups)):
if id(g) == id(self.groups[i]):
self.current_group = i
else:
self.current_group = idx
def total(self):
"""
Approximate task count: this value may be inaccurate if task generators are posted lazily (see :py:attr:`waflib.Build.BuildContext.post_mode`).
The value :py:attr:`waflib.Runner.Parallel.total` is updated during the task execution.
"""
total = 0
for group in self.groups:
for tg in group:
try:
total += len(tg.tasks)
except AttributeError:
total += 1
return total
def get_targets(self):
"""
Return the task generator corresponding to the 'targets' list, used by :py:meth:`waflib.Build.BuildContext.get_build_iterator`::
$ waf --targets=myprogram,myshlib
"""
to_post = []
min_grp = 0
for name in self.targets.split(','):
tg = self.get_tgen_by_name(name)
if not tg:
raise Errors.WafError('target %r does not exist' % name)
m = self.get_group_idx(tg)
if m > min_grp:
min_grp = m
to_post = [tg]
elif m == min_grp:
to_post.append(tg)
return (min_grp, to_post)
def get_all_task_gen(self):
"""
Utility method, returns a list of all task generators - if you need something more complicated, implement your own
"""
lst = []
for g in self.groups:
lst.extend(g)
return lst
def post_group(self):
"""
Post the task generators from the group indexed by self.cur, used by :py:meth:`waflib.Build.BuildContext.get_build_iterator`
"""
if self.targets == '*':
for tg in self.groups[self.cur]:
try:
f = tg.post
except AttributeError:
pass
else:
f()
elif self.targets:
if self.cur < self._min_grp:
for tg in self.groups[self.cur]:
try:
f = tg.post
except AttributeError:
pass
else:
f()
else:
for tg in self._exact_tg:
tg.post()
else:
ln = self.launch_node()
if ln.is_child_of(self.bldnode):
Logs.warn('Building from the build directory, forcing --targets=*')
ln = self.srcnode
elif not ln.is_child_of(self.srcnode):
Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)' % (ln.abspath(), self.srcnode.abspath()))
ln = self.srcnode
for tg in self.groups[self.cur]:
try:
f = tg.post
except AttributeError:
pass
else:
if tg.path.is_child_of(ln):
f()
def get_tasks_group(self, idx):
"""
Return all the tasks for the group of num idx, used by :py:meth:`waflib.Build.BuildContext.get_build_iterator`
"""
tasks = []
for tg in self.groups[idx]:
try:
tasks.extend(tg.tasks)
except AttributeError: # not a task generator, can be the case for installation tasks
tasks.append(tg)
return tasks
def get_build_iterator(self):
"""
Creates a generator object that returns lists of tasks executable in parallel (yield)
:return: tasks which can be executed immediatly
:rtype: list of :py:class:`waflib.Task.TaskBase`
"""
self.cur = 0
if self.targets and self.targets != '*':
(self._min_grp, self._exact_tg) = self.get_targets()
global lazy_post
if self.post_mode != POST_LAZY:
while self.cur < len(self.groups):
self.post_group()
self.cur += 1
self.cur = 0
while self.cur < len(self.groups):
# first post the task generators for the group
if self.post_mode != POST_AT_ONCE:
self.post_group()
# then extract the tasks
tasks = self.get_tasks_group(self.cur)
# if the constraints are set properly (ext_in/ext_out, before/after)
# the call to set_file_constraints may be removed (can be a 15% penalty on no-op rebuilds)
# (but leave set_file_constraints for the installation step)
#
# if the tasks have only files, set_file_constraints is required but set_precedence_constraints is not necessary
#
Task.set_file_constraints(tasks)
Task.set_precedence_constraints(tasks)
self.cur_tasks = tasks
self.cur += 1
if not tasks: # return something else the build will stop
continue
yield tasks
while 1:
yield []
#def install_dir(self, path, env=None):
# """
# Create empty folders for the installation (very rarely used) TODO
# """
# return
class inst(Task.Task):
"""
Special task used for installing files and symlinks, it behaves both like a task
and like a task generator
"""
color = 'CYAN'
def uid(self):
lst = [self.dest, self.path] + self.source
return Utils.h_list(repr(lst))
def post(self):
"""
Same interface as in :py:meth:`waflib.TaskGen.task_gen.post`
"""
buf = []
for x in self.source:
if isinstance(x, waflib.Node.Node):
y = x
else:
y = self.path.find_resource(x)
if not y:
if Logs.verbose:
Logs.warn('Could not find %s immediately (may cause broken builds)' % x)
idx = self.generator.bld.get_group_idx(self)
for tg in self.generator.bld.groups[idx]:
if not isinstance(tg, inst) and id(tg) != id(self):
tg.post()
y = self.path.find_resource(x)
if y:
break
else:
raise Errors.WafError('Could not find %r in %r' % (x, self.path))
buf.append(y)
self.inputs = buf
def runnable_status(self):
"""
Installation tasks are always executed, so this method returns either :py:const:`waflib.Task.ASK_LATER` or :py:const:`waflib.Task.RUN_ME`.
"""
ret = super(inst, self).runnable_status()
if ret == Task.SKIP_ME:
return Task.RUN_ME
return ret
def __str__(self):
"""Return an empty string to disable the display"""
return ''
def run(self):
"""The attribute 'exec_task' holds the method to execute"""
return self.generator.exec_task()
def get_install_path(self, destdir=True):
"""
Installation path obtained from ``self.dest`` and prefixed by the destdir.
The variables such as '${PREFIX}/bin' are substituted.
"""
dest = Utils.subst_vars(self.dest, self.env)
dest = dest.replace('/', os.sep)
if destdir and Options.options.destdir:
dest = os.path.join(Options.options.destdir, os.path.splitdrive(dest)[1].lstrip(os.sep))
return dest
def exec_install_files(self):
"""
Predefined method for installing files
"""
destpath = self.get_install_path()
if not destpath:
raise Errors.WafError('unknown installation path %r' % self.generator)
for x, y in zip(self.source, self.inputs):
if self.relative_trick:
destfile = os.path.join(destpath, y.path_from(self.path))
else:
destfile = os.path.join(destpath, y.name)
self.generator.bld.do_install(y.abspath(), destfile, self.chmod)
def exec_install_as(self):
"""
Predefined method for installing one file with a given name
"""
destfile = self.get_install_path()
self.generator.bld.do_install(self.inputs[0].abspath(), destfile, self.chmod)
def exec_symlink_as(self):
"""
Predefined method for installing a symlink
"""
destfile = self.get_install_path()
src = self.link
if self.relative_trick:
src = os.path.relpath(src, os.path.dirname(destfile))
self.generator.bld.do_link(src, destfile)
class InstallContext(BuildContext):
'''installs the targets on the system'''
cmd = 'install'
def __init__(self, **kw):
super(InstallContext, self).__init__(**kw)
# list of targets to uninstall for removing the empty folders after uninstalling
self.uninstall = []
self.is_install = INSTALL
def do_install(self, src, tgt, chmod=Utils.O644):
"""
Copy a file from src to tgt with given file permissions. The actual copy is not performed
if the source and target file have the same size and the same timestamps. When the copy occurs,
the file is first removed and then copied (prevent stale inodes).
This method is overridden in :py:meth:`waflib.Build.UninstallContext.do_install` to remove the file.
:param src: file name as absolute path
:type src: string
:param tgt: file destination, as absolute path
:type tgt: string
:param chmod: installation mode
:type chmod: int
"""
d, _ = os.path.split(tgt)
if not d:
raise Errors.WafError('Invalid installation given %r->%r' % (src, tgt))
Utils.check_dir(d)
srclbl = src.replace(self.srcnode.abspath() + os.sep, '')
if not Options.options.force:
# check if the file is already there to avoid a copy
try:
st1 = os.stat(tgt)
st2 = os.stat(src)
except OSError:
pass
else:
# same size and identical timestamps -> make no copy
if st1.st_mtime + 2 >= st2.st_mtime and st1.st_size == st2.st_size:
if not self.progress_bar:
Logs.info('- install %s (from %s)' % (tgt, srclbl))
return False
if not self.progress_bar:
Logs.info('+ install %s (from %s)' % (tgt, srclbl))
# following is for shared libs and stale inodes (-_-)
try:
os.remove(tgt)
except OSError:
pass
try:
shutil.copy2(src, tgt)
os.chmod(tgt, chmod)
except IOError:
try:
os.stat(src)
except (OSError, IOError):
Logs.error('File %r does not exist' % src)
raise Errors.WafError('Could not install the file %r' % tgt)
def do_link(self, src, tgt):
"""
Create a symlink from tgt to src.
This method is overridden in :py:meth:`waflib.Build.UninstallContext.do_link` to remove the symlink.
:param src: file name as absolute path
:type src: string
:param tgt: file destination, as absolute path
:type tgt: string
"""
d, _ = os.path.split(tgt)
Utils.check_dir(d)
link = False
if not os.path.islink(tgt):
link = True
elif os.readlink(tgt) != src:
link = True
if link:
try: os.remove(tgt)
except OSError: pass
if not self.progress_bar:
Logs.info('+ symlink %s (to %s)' % (tgt, src))
os.symlink(src, tgt)
else:
if not self.progress_bar:
Logs.info('- symlink %s (to %s)' % (tgt, src))
def run_task_now(self, tsk, postpone):
"""
This method is called by :py:meth:`waflib.Build.InstallContext.install_files`,
:py:meth:`waflib.Build.InstallContext.install_as` and :py:meth:`waflib.Build.InstallContext.symlink_as` immediately
after the installation task is created. Its role is to force the immediate execution if necessary, that is when
``postpone=False`` was given.
"""
tsk.post()
if not postpone:
if tsk.runnable_status() == Task.ASK_LATER:
raise self.WafError('cannot post the task %r' % tsk)
tsk.run()
def install_files(self, dest, files, env=None, chmod=Utils.O644, relative_trick=False, cwd=None, add=True, postpone=True):
"""
Create a task to install files on the system::
def build(bld):
bld.install_files('${DATADIR}', self.path.find_resource('wscript'))
:param dest: absolute path of the destination directory
:type dest: string
:param files: input files
:type files: list of strings or list of nodes
:param env: configuration set for performing substitutions in dest
:type env: Configuration set
:param relative_trick: preserve the folder hierarchy when installing whole folders
:type relative_trick: bool
:param cwd: parent node for searching srcfile, when srcfile is not a :py:class:`waflib.Node.Node`
:type cwd: :py:class:`waflib.Node.Node`
:param add: add the task created to a build group - set ``False`` only if the installation task is created after the build has started
:type add: bool
:param postpone: execute the task immediately to perform the installation
:type postpone: bool
"""
tsk = inst(env=env or self.env)
tsk.bld = self
tsk.path = cwd or self.path
tsk.chmod = chmod
if isinstance(files, waflib.Node.Node):
tsk.source = [files]
else:
tsk.source = Utils.to_list(files)
tsk.dest = dest
tsk.exec_task = tsk.exec_install_files
tsk.relative_trick = relative_trick
if add: self.add_to_group(tsk)
self.run_task_now(tsk, postpone)
return tsk
def install_as(self, dest, srcfile, env=None, chmod=Utils.O644, cwd=None, add=True, postpone=True):
"""
Create a task to install a file on the system with a different name::
def build(bld):
bld.install_as('${PREFIX}/bin', 'myapp', chmod=Utils.O755)
:param dest: absolute path of the destination file
:type dest: string
:param srcfile: input file
:type srcfile: string or node
:param cwd: parent node for searching srcfile, when srcfile is not a :py:class:`waflib.Node.Node`
:type cwd: :py:class:`waflib.Node.Node`
:param env: configuration set for performing substitutions in dest
:type env: Configuration set
:param add: add the task created to a build group - set ``False`` only if the installation task is created after the build has started
:type add: bool
:param postpone: execute the task immediately to perform the installation
:type postpone: bool
"""
tsk = inst(env=env or self.env)
tsk.bld = self
tsk.path = cwd or self.path
tsk.chmod = chmod
tsk.source = [srcfile]
tsk.dest = dest
tsk.exec_task = tsk.exec_install_as
if add: self.add_to_group(tsk)
self.run_task_now(tsk, postpone)
return tsk
def symlink_as(self, dest, src, env=None, cwd=None, add=True, postpone=True, relative_trick=False):
"""
Create a task to install a symlink::
def build(bld):
bld.symlink_as('${PREFIX}/lib/libfoo.so', 'libfoo.so.1.2.3')
:param dest: absolute path of the symlink
:type dest: string
:param src: absolute or relative path of the link
:type src: string
:param env: configuration set for performing substitutions in dest
:type env: Configuration set
:param add: add the task created to a build group - set ``False`` only if the installation task is created after the build has started
:type add: bool
:param postpone: execute the task immediately to perform the installation
:type postpone: bool
:param relative_trick: make the symlink relative (default: ``False``)
:type relative_trick: bool
"""
if Utils.is_win32:
# symlinks *cannot* work on that platform
return
tsk = inst(env=env or self.env)
tsk.bld = self
tsk.dest = dest
tsk.path = cwd or self.path
tsk.source = []
tsk.link = src
tsk.relative_trick = relative_trick
tsk.exec_task = tsk.exec_symlink_as
if add: self.add_to_group(tsk)
self.run_task_now(tsk, postpone)
return tsk
class UninstallContext(InstallContext):
'''removes the targets installed'''
cmd = 'uninstall'
def __init__(self, **kw):
super(UninstallContext, self).__init__(**kw)
self.is_install = UNINSTALL
def do_install(self, src, tgt, chmod=Utils.O644):
"""See :py:meth:`waflib.Build.InstallContext.do_install`"""
if not self.progress_bar:
Logs.info('- remove %s' % tgt)
self.uninstall.append(tgt)
try:
os.remove(tgt)
except OSError as e:
if e.errno != errno.ENOENT:
if not getattr(self, 'uninstall_error', None):
self.uninstall_error = True
Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)')
if Logs.verbose > 1:
Logs.warn('Could not remove %s (error code %r)' % (e.filename, e.errno))
# TODO ita refactor this into a post build action to uninstall the folders (optimization)
while tgt:
tgt = os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def do_link(self, src, tgt):
"""See :py:meth:`waflib.Build.InstallContext.do_link`"""
try:
if not self.progress_bar:
Logs.info('- remove %s' % tgt)
os.remove(tgt)
except OSError:
pass
# TODO ita refactor this into a post build action to uninstall the folders (optimization)?
while tgt:
tgt = os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def execute(self):
"""
See :py:func:`waflib.Context.Context.execute`
"""
try:
# do not execute any tasks
def runnable_status(self):
return Task.SKIP_ME
setattr(Task.Task, 'runnable_status_back', Task.Task.runnable_status)
setattr(Task.Task, 'runnable_status', runnable_status)
super(UninstallContext, self).execute()
finally:
setattr(Task.Task, 'runnable_status', Task.Task.runnable_status_back)
class CleanContext(BuildContext):
'''cleans the project'''
cmd = 'clean'
def execute(self):
"""
See :py:func:`waflib.Context.Context.execute`
"""
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
try:
self.clean()
finally:
self.store()
def clean(self):
"""Remove files from the build directory if possible, and reset the caches"""
Logs.debug('build: clean called')
if self.bldnode != self.srcnode:
# would lead to a disaster if top == out
lst=[]
for e in self.all_envs.values():
lst.extend(self.root.find_or_declare(f) for f in e[CFG_FILES])
for n in self.bldnode.ant_glob('**/*', excl='.lock* *conf_check_*/** config.log c4che/*', quiet=True):
if n in lst:
continue
n.delete()
self.root.children = {}
for v in 'node_deps task_sigs raw_deps'.split():
setattr(self, v, {})
class ListContext(BuildContext):
'''lists the targets to execute'''
cmd = 'list'
def execute(self):
"""
See :py:func:`waflib.Context.Context.execute`.
"""
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
self.pre_build()
# display the time elapsed in the progress bar
self.timer = Utils.Timer()
for g in self.groups:
for tg in g:
try:
f = tg.post
except AttributeError:
pass
else:
f()
try:
# force the cache initialization
self.get_tgen_by_name('')
except Exception:
pass
lst = list(self.task_gen_cache_names.keys())
lst.sort()
for k in lst:
Logs.pprint('GREEN', k)
class StepContext(BuildContext):
'''executes tasks in a step-by-step fashion, for debugging'''
cmd = 'step'
def __init__(self, **kw):
super(StepContext, self).__init__(**kw)
self.files = Options.options.files
def compile(self):
"""
Compile the tasks matching the input/output files given (regular expression matching). Derived from :py:meth:`waflib.Build.BuildContext.compile`::
$ waf step --files=foo.c,bar.c,in:truc.c,out:bar.o
$ waf step --files=in:foo.cpp.1.o # link task only
"""
if not self.files:
Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"')
BuildContext.compile(self)
return
targets = None
if self.targets and self.targets != '*':
targets = self.targets.split(',')
for g in self.groups:
for tg in g:
if targets and tg.name not in targets:
continue
try:
f = tg.post
except AttributeError:
pass
else:
f()
for pat in self.files.split(','):
matcher = self.get_matcher(pat)
for tg in g:
if isinstance(tg, Task.TaskBase):
lst = [tg]
else:
lst = tg.tasks
for tsk in lst:
do_exec = False
for node in getattr(tsk, 'inputs', []):
if matcher(node, output=False):
do_exec = True
break
for node in getattr(tsk, 'outputs', []):
if matcher(node, output=True):
do_exec = True
break
if do_exec:
ret = tsk.run()
Logs.info('%s -> exit %r' % (str(tsk), ret))
def get_matcher(self, pat):
# this returns a function
inn = True
out = True
if pat.startswith('in:'):
out = False
pat = pat.replace('in:', '')
elif pat.startswith('out:'):
inn = False
pat = pat.replace('out:', '')
anode = self.root.find_node(pat)
pattern = None
if not anode:
if not pat.startswith('^'):
pat = '^.+?%s' % pat
if not pat.endswith('$'):
pat = '%s$' % pat
pattern = re.compile(pat)
def match(node, output):
if output == True and not out:
return False
if output == False and not inn:
return False
if anode:
return anode == node
else:
return pattern.match(node.abspath())
return match
BuildContext.store = Utils.nogc(BuildContext.store)
BuildContext.restore = Utils.nogc(BuildContext.restore)
| |
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Module for working with EOS static routes
The staticroute resource provides configuration management of static
route resources on an EOS node. It provides the following class
implementations:
* StaticRoute - Configure static routes in EOS
StaticRoute Attributes:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
next_hop_ip (string): The next hop address on destination interface
distance (int): Administrative distance for this route
tag (int): Route tag
route_name (string): Route name
Notes:
The 'default' prefix function of the 'ip route' command,
'default ip route ...', currently equivalent to the 'no ip route ...'
command.
"""
import re
from pyeapi.api import EntityCollection
# Define the regex to match ip route lines (by lines in regex):
# 'ip route' header
# ip_dest
# next_hop
# next_hop_ip
# distance
# tag
# name
ROUTES_RE = re.compile(r'(?<=^ip route)'
r' (\d+\.\d+\.\d+\.\d+\/\d+)'
r' (\d+\.\d+\.\d+\.\d+|\S+)'
r'(?: (\d+\.\d+\.\d+\.\d+))?'
r' (\d+)'
r'(?: tag (\d+))?'
r'(?: name (\S+))?', re.M)
class StaticRoute(EntityCollection):
"""The StaticRoute class provides a configuration instance
for working with static routes
"""
def __str__(self):
return 'StaticRoute'
def get(self, name):
"""Retrieves the ip route information for the destination
ip address specified.
Args:
name (string): The ip address of the destination in the
form of A.B.C.D/E
Returns:
dict: An dict object of static route entries in the form::
{ ip_dest:
{ next_hop:
{ next_hop_ip:
{ distance:
{ 'tag': tag,
'route_name': route_name
}
}
}
}
}
If the ip address specified does not have any associated
static routes, then None is returned.
Notes:
The keys ip_dest, next_hop, next_hop_ip, and distance in
the returned dictionary are the values of those components
of the ip route specification. If a route does not contain
a next_hop_ip, then that key value will be set as 'None'.
"""
# Return the route configurations for the specified ip address,
# or None if its not found
return self.getall().get(name)
def getall(self):
"""Return all ip routes configured on the switch as a resource dict
Returns:
dict: An dict object of static route entries in the form::
{ ip_dest:
{ next_hop:
{ next_hop_ip:
{ distance:
{ 'tag': tag,
'route_name': route_name
}
}
}
}
}
If the ip address specified does not have any associated
static routes, then None is returned.
Notes:
The keys ip_dest, next_hop, next_hop_ip, and distance in
the returned dictionary are the values of those components
of the ip route specification. If a route does not contain
a next_hop_ip, then that key value will be set as 'None'.
"""
# Find all the ip routes in the config
matches = ROUTES_RE.findall(self.config)
# Parse the routes and add them to the routes dict
routes = dict()
for match in matches:
# Get the four identifying components
ip_dest = match[0]
next_hop = match[1]
next_hop_ip = None if match[2] == '' else match[2]
distance = int(match[3])
# Create the data dict with the remaining components
data = {}
data['tag'] = None if match[4] == '' else int(match[4])
data['route_name'] = None if match[5] == '' else match[5]
# Build the complete dict entry from the four components
# and the data.
# temp_dict = parent_dict[key] = parent_dict.get(key, {})
# This creates the keyed dict in the parent_dict if it doesn't
# exist, or reuses the existing keyed dict.
# The temp_dict is used to make things more readable.
ip_dict = routes[ip_dest] = routes.get(ip_dest, {})
nh_dict = ip_dict[next_hop] = ip_dict.get(next_hop, {})
nhip_dict = nh_dict[next_hop_ip] = nh_dict.get(next_hop_ip, {})
nhip_dict[distance] = data
return routes
def create(self, ip_dest, next_hop, **kwargs):
"""Create a static route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False.
"""
# Call _set_route with delete and default set to False
return self._set_route(ip_dest, next_hop, **kwargs)
def delete(self, ip_dest, next_hop, **kwargs):
"""Delete a static route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False.
"""
# Call _set_route with the delete flag set to True
kwargs.update({'delete': True})
return self._set_route(ip_dest, next_hop, **kwargs)
def default(self, ip_dest, next_hop, **kwargs):
"""Set a static route to default (i.e. delete the matching route)
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False.
"""
# Call _set_route with the default flag set to True
kwargs.update({'default': True})
return self._set_route(ip_dest, next_hop, **kwargs)
def set_tag(self, ip_dest, next_hop, **kwargs):
"""Set the tag value for the specified route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False.
Notes:
Any existing route_name value must be included in call to
set_tag, otherwise the tag will be reset
by the call to EOS.
"""
# Call _set_route with the new tag information
return self._set_route(ip_dest, next_hop, **kwargs)
def set_route_name(self, ip_dest, next_hop, **kwargs):
"""Set the route_name value for the specified route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False.
Notes:
Any existing tag value must be included in call to
set_route_name, otherwise the tag will be reset
by the call to EOS.
"""
# Call _set_route with the new route_name information
return self._set_route(ip_dest, next_hop, **kwargs)
def _build_commands(self, ip_dest, next_hop, **kwargs):
"""Build the EOS command string for ip route interactions.
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns the ip route command string to be sent to the switch for
the given set of parameters.
"""
commands = "ip route %s %s" % (ip_dest, next_hop)
next_hop_ip = kwargs.get('next_hop_ip', None)
distance = kwargs.get('distance', None)
tag = kwargs.get('tag', None)
route_name = kwargs.get('route_name', None)
if next_hop_ip is not None:
commands += " %s" % next_hop_ip
if distance is not None:
commands += " %s" % distance
if tag is not None:
commands += " tag %s" % tag
if route_name is not None:
commands += " name %s" % route_name
return commands
def _set_route(self, ip_dest, next_hop, **kwargs):
"""Configure a static route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
**kwargs['delete'] (boolean): If true, deletes the specified route
instead of creating or setting values for the route
**kwargs['default'] (boolean): If true, defaults the specified
route instead of creating or setting values for the route
Returns:
True if the operation succeeds, otherwise False.
"""
commands = self._build_commands(ip_dest, next_hop, **kwargs)
delete = kwargs.get('delete', False)
default = kwargs.get('default', False)
# Prefix with 'no' if delete is set
if delete:
commands = "no " + commands
# Or with 'default' if default is setting
else:
if default:
commands = "default " + commands
return self.configure(commands)
def instance(node):
"""Returns an instance of StaticRoute
This method will create and return an instance of the StaticRoute
object passing the value of API to the object. The instance method
is required for the resource to be autoloaded by the Node object
Args:
node (Node): The node argument passes an instance of Node to the
resource
"""
return StaticRoute(node)
| |
from main import GuessManager
def test_init_uppercase():
g = GuessManager('SOMEWORD')
assert g.word == 'SOMEWORD'
assert g.mask == [False]*8
def test_init_mask():
mask = [True, False, True, False, True, False, True, False]
g = GuessManager('SOMEWORD', mask=mask)
assert g.word == 'SOMEWORD'
assert g.mask == mask
def test_init_mword():
g = GuessManager('ABC', mask=[False, True, False])
assert g._mword == [('A', False), ('B', True), ('C', False)]
def test_init_lowercase():
g = GuessManager('someword')
assert g.word == 'SOMEWORD'
assert g.mask == [False]*8
def test_init_guessed_letters():
g = GuessManager('someword')
assert g.guessed_letters == set()
def test_init_tried_letters():
g = GuessManager('someword')
assert g.tried_letters == set()
def test_init_hidden_letters():
g = GuessManager('someword')
assert g.hidden_letters == set(['S', 'O', 'M', 'E', 'W', 'R', 'D'])
def test_init_hidden_letters_checks_mask():
g = GuessManager('some', mask=[True, False, False, False])
assert g.hidden_letters == set(['O', 'M', 'E'])
def test_len():
g = GuessManager('someword')
assert g.len == 8
def test_len_checks_mask():
g = GuessManager('a b', mask=[False, True, False])
assert g.len == 2
def test_guessed():
g = GuessManager('a b', mask=[False, True, False])
assert g.guessed == 0
def test_missing():
g = GuessManager('someword')
assert g.missing == 8
def test_missing_checks_mask():
g = GuessManager('a b', mask=[False, True, False])
assert g.missing == 2
def test_status():
g = GuessManager('someword')
assert g.status == [None] * 8
def test_status_check_mask():
g = GuessManager('some', [True, False, True, False])
assert g.status == ['S', None, 'M', None]
def test_guess_letter():
g = GuessManager('someword')
res = g.guess('m')
assert g.guessed_letters == set(['M'])
assert g.tried_letters == set(['M'])
assert g.guessed == 1
assert res == 1
assert g.missing == 7
assert g.status == [None, None, 'M', None, None, None, None, None]
def test_guess_letter_with_mask():
g = GuessManager('a bc', mask=[False, True, False, False])
res = g.guess('a')
assert g.guessed_letters == set(['A'])
assert g.tried_letters == set(['A'])
assert g.guessed == 1
assert res == 1
assert g.missing == 2
assert g.status == ['A', ' ', None, None]
def test_guess_more_than_one_letter():
g = GuessManager('someword')
res = g.guess('o')
assert g.guessed_letters == set(['O'])
assert g.tried_letters == set(['O'])
assert g.guessed == 2
assert res == 2
assert g.missing == 6
assert g.status == [None, 'O', None, None, None, 'O', None, None]
def test_guess_multiple_calls_same_letter():
g = GuessManager('someword')
res = g.guess('o')
res = g.guess('o')
assert g.guessed_letters == set(['O'])
assert g.tried_letters == set(['O'])
assert g.guessed == 2
assert res == 0
assert g.missing == 6
assert g.status == [None, 'O', None, None, None, 'O', None, None]
def test_guess_multiple_calls_different_letters():
g = GuessManager('someword')
g.guess('o')
g.guess('m')
assert g.guessed_letters == set(['O', 'M'])
assert g.tried_letters == set(['O', 'M'])
assert g.guessed == 3
assert g.missing == 5
assert g.status == [None, 'O', 'M', None, None, 'O', None, None]
def test_wrong_guess():
g = GuessManager('someword')
res = g.guess('x')
assert g.guessed_letters == set()
assert g.tried_letters == set(['X'])
assert g.guessed == 0
assert res == 0
assert g.missing == 8
assert g.status == [None, None, None, None, None, None, None, None]
def test_guess_word_successful():
g = GuessManager('someword')
res = g.guess_word('someword')
assert g.guessed_letters == set(['S', 'O', 'M', 'E', 'W', 'O', 'R', 'D'])
assert g.tried_letters == set()
assert g.guessed == 8
assert res == 8
assert g.missing == 0
assert g.status == list('someword'.upper())
def test_guess_word_checks_mask():
g = GuessManager('a (19)', mask=[False, True, True, True, True, True])
res = g.guess_word('a (19)')
assert g.guessed_letters == set(['A'])
assert g.tried_letters == set()
assert g.guessed == 1
assert res == 1
assert g.missing == 0
assert g.status == list('a (19)'.upper())
def test_guess_word_successful_after_guessed_letters():
g = GuessManager('someword')
g.guess('s')
g.guess('o')
res = g.guess_word('someword')
assert g.guessed_letters == set(['S', 'O', 'M', 'E', 'W', 'O', 'R', 'D'])
assert g.tried_letters == set(['S', 'O'])
assert g.guessed == 8
assert res == 5
assert g.missing == 0
assert g.status == list('someword'.upper())
def test_guess_word_unsuccessful():
g = GuessManager('someword')
res = g.guess_word('sameward')
assert g.guessed_letters == set()
assert g.tried_letters == set()
assert res == 0
assert g.missing == 8
assert g.status == [None, None, None, None, None, None, None, None]
def test_guess_word_unsuccessful_after_guessed_letters():
g = GuessManager('someword')
g.guess('s')
g.guess('o')
res = g.guess_word('somelord')
assert g.guessed_letters == set(['S', 'O'])
assert g.tried_letters == set(['S', 'O'])
assert res == 0
assert g.missing == 5
assert g.status == ['S', 'O', None, None, None, 'O', None, None]
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
| |
"""Define tests for the Flux LED/Magic Home config flow."""
from __future__ import annotations
from unittest.mock import patch
import pytest
from homeassistant import config_entries
from homeassistant.components import dhcp
from homeassistant.components.flux_led.const import (
CONF_CUSTOM_EFFECT_COLORS,
CONF_CUSTOM_EFFECT_SPEED_PCT,
CONF_CUSTOM_EFFECT_TRANSITION,
DOMAIN,
MODE_RGB,
TRANSITION_JUMP,
TRANSITION_STROBE,
)
from homeassistant.const import (
CONF_DEVICE,
CONF_HOST,
CONF_MAC,
CONF_MODE,
CONF_NAME,
CONF_PROTOCOL,
)
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_ABORT, RESULT_TYPE_FORM
from . import (
DEFAULT_ENTRY_TITLE,
DHCP_DISCOVERY,
FLUX_DISCOVERY,
FLUX_DISCOVERY_PARTIAL,
IP_ADDRESS,
MAC_ADDRESS,
MODULE,
_patch_discovery,
_patch_wifibulb,
)
from tests.common import MockConfigEntry
MAC_ADDRESS_DIFFERENT = "ff:bb:ff:dd:ee:ff"
async def test_discovery(hass: HomeAssistant):
"""Test setting up discovery."""
with _patch_discovery(), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
# test we can try again
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_DEVICE: MAC_ADDRESS},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == DEFAULT_ENTRY_TITLE
assert result3["data"] == {CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE}
mock_setup.assert_called_once()
mock_setup_entry.assert_called_once()
# ignore configured devices
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_discovery_with_existing_device_present(hass: HomeAssistant):
"""Test setting up discovery."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: "127.0.0.2"}, unique_id="dd:dd:dd:dd:dd:dd"
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_wifibulb(no_device=True):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
# Now abort and make sure we can start over
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "pick_device"
assert not result2["errors"]
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_DEVICE: MAC_ADDRESS}
)
assert result3["type"] == "create_entry"
assert result3["title"] == DEFAULT_ENTRY_TITLE
assert result3["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_NAME: DEFAULT_ENTRY_TITLE,
}
await hass.async_block_till_done()
mock_setup_entry.assert_called_once()
# ignore configured devices
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_discovery_no_device(hass: HomeAssistant):
"""Test discovery without device."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with _patch_discovery(no_device=True), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
async def test_import(hass: HomeAssistant):
"""Test import from yaml."""
config = {
CONF_HOST: IP_ADDRESS,
CONF_MAC: MAC_ADDRESS,
CONF_NAME: "floor lamp",
CONF_PROTOCOL: "ledenet",
CONF_MODE: MODE_RGB,
CONF_CUSTOM_EFFECT_COLORS: "[255,0,0], [0,0,255]",
CONF_CUSTOM_EFFECT_SPEED_PCT: 30,
CONF_CUSTOM_EFFECT_TRANSITION: TRANSITION_STROBE,
}
# Success
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "floor lamp"
assert result["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_NAME: "floor lamp",
CONF_PROTOCOL: "ledenet",
}
assert result["options"] == {
CONF_MODE: MODE_RGB,
CONF_CUSTOM_EFFECT_COLORS: "[255,0,0], [0,0,255]",
CONF_CUSTOM_EFFECT_SPEED_PCT: 30,
CONF_CUSTOM_EFFECT_TRANSITION: TRANSITION_STROBE,
}
mock_setup.assert_called_once()
mock_setup_entry.assert_called_once()
# Duplicate
with _patch_discovery(), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=config
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_manual_working_discovery(hass: HomeAssistant):
"""Test manually setup."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
# Cannot connect (timeout)
with _patch_discovery(no_device=True), _patch_wifibulb(no_device=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
# Success
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
), patch(f"{MODULE}.async_setup_entry", return_value=True):
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result4["type"] == "create_entry"
assert result4["title"] == DEFAULT_ENTRY_TITLE
assert result4["data"] == {CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE}
# Duplicate
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with _patch_discovery(no_device=True), _patch_wifibulb(no_device=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_manual_no_discovery_data(hass: HomeAssistant):
"""Test manually setup without discovery data."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert not result["errors"]
with _patch_discovery(no_device=True), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
), patch(f"{MODULE}.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_HOST: IP_ADDRESS}
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["data"] == {CONF_HOST: IP_ADDRESS, CONF_NAME: IP_ADDRESS}
async def test_discovered_by_discovery_and_dhcp(hass):
"""Test we get the form with discovery and abort for dhcp source when we get both."""
with _patch_discovery(), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DISCOVERY},
data=FLUX_DISCOVERY,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=DHCP_DISCOVERY,
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "already_in_progress"
with _patch_discovery(), _patch_wifibulb():
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
hostname="any",
ip=IP_ADDRESS,
macaddress="00:00:00:00:00:00",
),
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_ABORT
assert result3["reason"] == "already_in_progress"
async def test_discovered_by_discovery(hass):
"""Test we can setup when discovered from discovery."""
with _patch_discovery(), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DISCOVERY},
data=FLUX_DISCOVERY,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE}
assert mock_async_setup.called
assert mock_async_setup_entry.called
async def test_discovered_by_dhcp_udp_responds(hass):
"""Test we can setup when discovered from dhcp but with udp response."""
with _patch_discovery(), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_DHCP}, data=DHCP_DISCOVERY
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE}
assert mock_async_setup.called
assert mock_async_setup_entry.called
async def test_discovered_by_dhcp_no_udp_response(hass):
"""Test we can setup when discovered from dhcp but no udp response."""
with _patch_discovery(no_device=True), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_DHCP}, data=DHCP_DISCOVERY
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(no_device=True), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_NAME: DEFAULT_ENTRY_TITLE,
}
assert mock_async_setup.called
assert mock_async_setup_entry.called
async def test_discovered_by_dhcp_partial_udp_response_fallback_tcp(hass):
"""Test we can setup when discovered from dhcp but part of the udp response is missing."""
with _patch_discovery(no_device=True), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_DHCP}, data=DHCP_DISCOVERY
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with _patch_discovery(device=FLUX_DISCOVERY_PARTIAL), _patch_wifibulb(), patch(
f"{MODULE}.async_setup", return_value=True
) as mock_async_setup, patch(
f"{MODULE}.async_setup_entry", return_value=True
) as mock_async_setup_entry:
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == {
CONF_HOST: IP_ADDRESS,
CONF_NAME: DEFAULT_ENTRY_TITLE,
}
assert mock_async_setup.called
assert mock_async_setup_entry.called
async def test_discovered_by_dhcp_no_udp_response_or_tcp_response(hass):
"""Test we can setup when discovered from dhcp but no udp response or tcp response."""
with _patch_discovery(no_device=True), _patch_wifibulb(no_device=True):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_DHCP}, data=DHCP_DISCOVERY
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
@pytest.mark.parametrize(
"source, data",
[
(config_entries.SOURCE_DHCP, DHCP_DISCOVERY),
(config_entries.SOURCE_DISCOVERY, FLUX_DISCOVERY),
],
)
async def test_discovered_by_dhcp_or_discovery_adds_missing_unique_id(
hass, source, data
):
"""Test we can setup when discovered from dhcp or discovery."""
config_entry = MockConfigEntry(domain=DOMAIN, data={CONF_HOST: IP_ADDRESS})
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=data
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert config_entry.unique_id == MAC_ADDRESS
@pytest.mark.parametrize(
"source, data",
[
(config_entries.SOURCE_DHCP, DHCP_DISCOVERY),
(config_entries.SOURCE_DISCOVERY, FLUX_DISCOVERY),
],
)
async def test_discovered_by_dhcp_or_discovery_mac_address_mismatch_host_already_configured(
hass, source, data
):
"""Test we abort if the host is already configured but the mac does not match."""
config_entry = MockConfigEntry(
domain=DOMAIN, data={CONF_HOST: IP_ADDRESS}, unique_id=MAC_ADDRESS_DIFFERENT
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_wifibulb():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=data
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert config_entry.unique_id == MAC_ADDRESS_DIFFERENT
async def test_options(hass: HomeAssistant):
"""Test options flow."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: IP_ADDRESS, CONF_NAME: DEFAULT_ENTRY_TITLE},
options={
CONF_MODE: MODE_RGB,
CONF_CUSTOM_EFFECT_COLORS: "[255,0,0], [0,0,255]",
CONF_CUSTOM_EFFECT_SPEED_PCT: 30,
CONF_CUSTOM_EFFECT_TRANSITION: TRANSITION_STROBE,
},
unique_id=MAC_ADDRESS,
)
config_entry.add_to_hass(hass)
with _patch_discovery(), _patch_wifibulb():
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
user_input = {
CONF_CUSTOM_EFFECT_COLORS: "[0,0,255], [255,0,0]",
CONF_CUSTOM_EFFECT_SPEED_PCT: 50,
CONF_CUSTOM_EFFECT_TRANSITION: TRANSITION_JUMP,
}
with _patch_discovery(), _patch_wifibulb():
result2 = await hass.config_entries.options.async_configure(
result["flow_id"], user_input
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["data"] == user_input
assert result2["data"] == config_entry.options
assert hass.states.get("light.bulb_rgbcw_ddeeff") is not None
| |
import ast
from pprint import pprint
class Traductor(ast.NodeVisitor):
def __init__(self):
# we store the built prog
self.prog = ""
self.line = []
def visit_Module(self, node):
"""Visit the module body
Iterate over all ast element in a module
"""
for ele in node.body:
tmp = self.visit(ele)
if tmp:
self.prog += tmp + "\n"
self.line.append(ele.lineno)
def visit_FunctionDef(self, node):
# first we store the name of func
s = "Func(String(%s), " % (node.name)
s += "[%s], " % (", ".join([ele.arg for ele in node.args.args]))
s += 'Expr_list([%s]))' % (
", ".join([self.visit(ele) for ele in node.body])
)
return s
def visit_AsyncFunctionDef(self, node):
# first we store the name of func
s = "AsyncFunc(String(%s), " % (node.name)
s += "[%s], " % (", ".join([ele.arg for ele in node.args.args]))
s += 'Expr_list([%s]))' % (
", ".join([self.visit(ele) for ele in node.body])
)
return s
def visit_If(self, node):
# first we add the Boolean comparing
s = "Instr.i_if(cond=%s, " % (self.visit(node.test))
s += "b_then=Block("
# then we iterate over the expression in the then
s += ", ".join([self.visit(ele) for ele in node.body])
s += "), b_else=Block("
# then we iterate over the expression in the else
s += ", ".join([self.visit(ele) for ele in node.orelse])
s += "))"
return s
def visit_While(self, node):
# first we add the Booleancomparing
s = "Instr.i_while(cond=%s, " % (self.visit(node.test))
s += "block=Block("
# then we ireate over the expression in the body
s += ", ".join([self.visit(ele) for ele in node.body])
s += "))"
return s
def visit_Return(self, node):
return 'Return(%s)' % (self.visit(node.value))
def visit_Assign(self, node):
"""Give us assign
"""
s = "Instr.assign(varName="
# we get the targets, we suppose that we always have
# only on target
s += self.visit(node.targets[0]) + ", expr="
# we get the value
s += self.visit(node.value) + ")"
return s
# def visit_Dict(self, node):
# # we suppose that the dictionnary is alwasy "string" -> "expr"
# s = 'Context({'
# # we iterate and construct the couple key/value
# for index, _ in enumerate(node.keys):
# if index == len(node.keys) - 1:
# s += '%s:%s' % (
# self.visit(node.keys[index]),
# self.visit(node.values[index])
# )
# else:
# s += '%s:%s, ' % (
# self.visit(node.keys[index]),
# self.visit(node.values[index])
# )
# s += "})"
# return s
# def visit_List(self, node):
# # iterate over the value and store expr in the list
# values = ", ".join([self.visit(ele) for ele in node.elts])
# return 'Expr_list([%s])' % (values)
def visit_Compare(self, node):
# we suppose we have only on comparaison
s = 'Expr.expr_binary(op=%s, ' % (self.visit(node.ops[0]))
s += "expr1=%s, " % (self.visit(node.left))
s += "expr2=%s)" % (self.visit(node.comparators[0]))
return s
def visit_Call(self, node):
s = 'Call(%s, %s)' % (
self.visit(node.func),
", ".join([self.visit(arg) for arg in node.args])
)
return s
def visit_Expr(self, node):
"""Give us the value of an expression
"""
return 'Instr.expr(expr=%s)' % (self.visit(node.value))
def visit_Await(self, node):
return 'Await(%s)' % (self.visit(node.value))
def visit_BinOp(self, node):
# we get the left, right operand, and the operator
s = "Expr.expr_binary(op=%s, " % (self.visit(node.op))
s += "expr1=%s, " % (self.visit(node.left))
s += "expr2=%s)" % (self.visit(node.right))
return s
def visit_BoolOp(self, node):
# we suppose we have only two elements
s = "Expr.expr_binary(op=%s, " % (self.visit(node.op))
s += "expr1=%s, " % (self.visit(node.values[0]))
s += "expr2=%s)" % (self.visit(node.values[1]))
return s
def visit_UnaryOp(self, node):
return 'Expr.expr_unary(op=%s, expr=%s)' % (
self.visit(node.op), self.visit(node.operand)
)
def visit_USub(self, node):
return "Unary_Op.uSub()"
def visit_UAdd(self, node):
return "Unary_Op.uAdd()"
def visit_Gt(self, node):
return "Binary_Op.gt()"
def visit_GtE(self, node):
return "Binary_Op.ge()"
def visit_Lt(self, node):
return "Binary_Op.lt()"
def visit_LtE(self, node):
return "Binary_Op.le()"
def visit_Eq(self, node):
return "Binary_Op.eq()"
def visit_NotEq(self, node):
return "Binary_Op.diff()"
def visit_Mult(self, node):
return "Binary_Op.mult()"
def visit_Div(self, node):
return "Binary_Op.div()"
def visit_Sub(self, node):
return "Binary_Op.sub()"
def visit_Add(self, node):
return "Binary_Op.add()"
def visit_And(self, node):
return "Binary_Op.o_and()"
def visit_Name(self, node):
return 'Expr.expr_variable(%s)' % (str(node.id))
def visit_Str(self, node):
return 'Expr.expr_lit(Literal.lit_string(String(%s)))' % (str(node.s))
def visit_Num(self, node):
return 'Expr.expr_lit(Literal.lit_z(Z(%s)))' % (str(node.n))
def print_prog(program, line):
print("\n")
for [expr, no] in zip(program.split('\n'), line):
print(no, ".\t", expr)
print("\n")
def beautiful_print(program):
# we spilt into multiple line
lines = program.split('\n')[0:-1]
for line in lines:
pprint(line)
print("\n")
# indent = 0
# res = ''
# for line in lines:
# for char in line:
# if char == '(':
# indent += 1
# res += "(\n" + "\t" * indent
# elif char == ')':
# indent -= 1
# res += "\n" + "\t" * indent + ")"
# else:
# res += char
# print(res)
if __name__ == '__main__':
s = """
s = 5
s = s + 5
s = -1
a = 5
if s > a and a < s:
s = 0 + d
"Hello World"
else:
s=1
while s < 10:
s = s + 1
def my_func(a,b,c):
res = a + b
res = res / c
return res
async def func(a, b, c):
res = a + b
await res
return res
func(1, 2, 3)
"""
t = ast.parse(s)
pprint(ast.dump(t))
print("\n")
x = Traductor()
x.visit(t)
# print_prog(s, list(range(1, len(s) + 1)))
# print_prog(x.prog, x.line)
beautiful_print(x.prog)
| |
# Copyright 2019-present Kensho Technologies, LLC.
import bisect
from dataclasses import dataclass
from typing import Any, Dict, Set, Union
from graphql import GraphQLInterfaceType, GraphQLObjectType
from ..compiler.compiler_frontend import ast_to_ir
from ..compiler.helpers import (
BaseLocation,
FoldScopeLocation,
Location,
get_edge_direction_and_name,
)
from ..compiler.metadata import FilterInfo, QueryMetadataTable
from ..cost_estimation.cardinality_estimator import estimate_query_result_cardinality
from ..cost_estimation.int_value_conversion import (
convert_int_to_field_value,
field_supports_range_reasoning,
)
from ..cost_estimation.interval import Interval
from ..global_utils import (
ASTWithParameters,
PropertyPath,
QueryStringWithParameters,
VertexPath,
cached_property,
)
from ..query_formatting.common import validate_arguments
from ..schema import is_meta_field
from ..schema.schema_info import EdgeConstraint, QueryPlanningSchemaInfo
from .filter_selectivity_utils import (
Selectivity,
adjust_counts_with_selectivity,
filter_uses_only_runtime_parameters,
get_integer_interval_for_filters_on_field,
get_selectivity_of_filters_at_vertex,
)
from .helpers import is_uuid4_type
def _convert_int_interval_to_field_value_interval(
schema_info: QueryPlanningSchemaInfo, vertex_type: str, field: str, interval: Interval[int]
) -> Interval[Any]:
"""Convert the integer interval endpoints to a type appropriate for the field.
Args:
schema_info: QueryPlanningSchemaInfo
vertex_type: name of a vertex type
field: name of a field on the vertex_type
interval: interval to convert
Returns:
Interval with endpoints appropriate for the field on the vertex_type.
"""
lower_bound = None
upper_bound = None
if interval.lower_bound is not None:
lower_bound = convert_int_to_field_value(
schema_info, vertex_type, field, interval.lower_bound
)
if interval.upper_bound is not None:
upper_bound = convert_int_to_field_value(
schema_info, vertex_type, field, interval.upper_bound
)
return Interval(lower_bound, upper_bound)
def _get_location_vertex_path(location: BaseLocation) -> VertexPath:
"""Get the VertexPath for a BaseLocation pointing at a vertex."""
if location.field is not None:
raise AssertionError(
f"Location {location} represents a field. Expected a location pointing at a vertex."
)
if isinstance(location, Location):
return location.query_path
elif isinstance(location, FoldScopeLocation):
return location.base_location.query_path + tuple(
"{}_{}".format(direction, name) for direction, name in location.fold_path
)
raise AssertionError("Unexpected location encountered: {}".format(location))
def get_types(
query_metadata: QueryMetadataTable,
) -> Dict[VertexPath, Union[GraphQLObjectType, GraphQLInterfaceType]]:
"""Find the type at each VertexPath.
Fold scopes are not considered.
Args:
query_metadata: info on locations, inputs, outputs, and tags in the query
Returns:
dict mapping nodes to their type names
"""
location_types = {}
for location, location_info in query_metadata.registered_locations:
location_types[_get_location_vertex_path(location)] = location_info.type
return location_types
def get_filters(query_metadata: QueryMetadataTable) -> Dict[VertexPath, Set[FilterInfo]]:
"""Get the filters at each VertexPath."""
filters: Dict[VertexPath, Set[FilterInfo]] = {}
for location, _ in query_metadata.registered_locations:
filter_infos = query_metadata.get_filter_infos(location)
filters.setdefault(_get_location_vertex_path(location), set()).update(filter_infos)
return filters
def get_fold_scope_roots(query_metadata: QueryMetadataTable) -> Dict[VertexPath, VertexPath]:
"""Map each VertexPath in the query that's inside a fold to the VertexPath of the fold."""
fold_scope_roots: Dict[VertexPath, VertexPath] = {}
for location, _ in query_metadata.registered_locations:
if isinstance(location, FoldScopeLocation):
fold_scope_roots[
_get_location_vertex_path(location)
] = location.base_location.query_path
return fold_scope_roots
def get_single_field_filters(
filters: Dict[VertexPath, Set[FilterInfo]],
) -> Dict[PropertyPath, Set[FilterInfo]]:
"""Find the single field filters for each field.
Filters that apply to multiple fields, like name_or_alias, are ignored.
Args:
filters: the set of filters at each node
Returns:
dict mapping fields to their set of filters.
"""
single_field_filters = {}
for vertex_path, filter_infos in filters.items():
# Group filters by field
single_field_filters_for_vertex: Dict[str, Set[FilterInfo]] = {}
for filter_info in filter_infos:
if len(filter_info.fields) == 0:
raise AssertionError(f"Got filter on 0 fields {filter_info} on {vertex_path}")
elif len(filter_info.fields) == 1:
single_field_filters_for_vertex.setdefault(filter_info.fields[0], set()).add(
filter_info
)
else:
pass
for field_name, field_filters in single_field_filters_for_vertex.items():
property_path = PropertyPath(vertex_path, field_name)
single_field_filters[property_path] = field_filters
return single_field_filters
def get_fields_eligible_for_pagination(
schema_info: QueryPlanningSchemaInfo,
types: Dict[VertexPath, Union[GraphQLObjectType, GraphQLInterfaceType]],
single_field_filters: Dict[PropertyPath, Set[FilterInfo]],
fold_scope_roots: Dict[VertexPath, VertexPath],
) -> Set[PropertyPath]:
"""Return all the fields we can consider for pagination."""
fields_eligible_for_pagination = set()
for vertex_path, vertex_type in types.items():
vertex_type_name = vertex_type.name
if vertex_path in fold_scope_roots:
continue
for field_name, _ in vertex_type.fields.items():
property_path = PropertyPath(vertex_path, field_name)
filters: Set[FilterInfo] = single_field_filters.get(property_path, set())
eligible_for_pagination = True
if not field_supports_range_reasoning(schema_info, vertex_type_name, field_name):
eligible_for_pagination = False
for filter_info in filters:
if not filter_uses_only_runtime_parameters(filter_info):
eligible_for_pagination = False
if is_meta_field(field_name):
eligible_for_pagination = False
if eligible_for_pagination:
fields_eligible_for_pagination.add(property_path)
return fields_eligible_for_pagination
def get_field_value_intervals(
schema_info: QueryPlanningSchemaInfo,
types: Dict[VertexPath, Union[GraphQLObjectType, GraphQLInterfaceType]],
single_field_filters: Dict[PropertyPath, Set[FilterInfo]],
parameters: Dict[str, Any],
) -> Dict[PropertyPath, Interval[Any]]:
"""Map the PropertyPath of each supported field with filters to its field value interval.
This method only considers fields on which we have range reasoning
(see field_supports_range_reasoning) that are not inside folds.
Args:
schema_info: QueryPlanningSchemaInfo
types: the type at each node
single_field_filters: the set of filters at each node
parameters: parameters used for the query
Returns:
dict mapping some PropertyPath objects to their interval of allowed values
"""
field_value_intervals = {}
for vertex_path, vertex_type in types.items():
vertex_type_name = vertex_type.name
for field_name, _ in vertex_type.fields.items():
property_path = PropertyPath(vertex_path, field_name)
filters_on_field: Set[FilterInfo] = single_field_filters.get(property_path, set())
if not filters_on_field:
continue
if field_supports_range_reasoning(schema_info, vertex_type_name, field_name):
integer_interval = get_integer_interval_for_filters_on_field(
schema_info, filters_on_field, vertex_type_name, field_name, parameters
)
field_value_interval = _convert_int_interval_to_field_value_interval(
schema_info, vertex_type_name, field_name, integer_interval
)
property_path = PropertyPath(vertex_path, field_name)
field_value_intervals[property_path] = field_value_interval
return field_value_intervals
def get_selectivities(
schema_info: QueryPlanningSchemaInfo,
types: Dict[VertexPath, Union[GraphQLObjectType, GraphQLInterfaceType]],
filters: Dict[VertexPath, Set[FilterInfo]],
parameters: Dict[str, Any],
) -> Dict[VertexPath, Selectivity]:
"""Get the combined selectivities of filters at each vertex."""
selectivities = {}
for vertex_path, vertex_type in types.items():
vertex_type_name = vertex_type.name
filter_infos = filters[vertex_path]
# TODO(bojanserafimov) use precomputed field_value_intervals
# inside this method instead of recomputing it
selectivity = get_selectivity_of_filters_at_vertex(
schema_info, filter_infos, parameters, vertex_type_name
)
selectivities[vertex_path] = selectivity
return selectivities
def get_distinct_result_set_estimates(
schema_info: QueryPlanningSchemaInfo,
types: Dict[VertexPath, Union[GraphQLObjectType, GraphQLInterfaceType]],
selectivities: Dict[VertexPath, Selectivity],
parameters: Dict[str, Any],
) -> Dict[VertexPath, float]:
"""Map each VertexPath in the query to its distinct result set estimate.
VertexPaths that lead into a fold scope are omitted.
The distinct result set estimate for vertex query node is the expected number of
different instances of the vertex type that will appear in the result set of the
query. For instance, suppose a query that included an edge traversal from A to B
that also included a unique filter on A. In this case, the distinct result estimate
for A is 1 even though the cardinality of the result set might be quite large.
Args:
schema_info: QueryPlanningSchemaInfo
types: the type at each node
selectivities: the selectivities at each VertexPath
parameters: the query parameters
Returns:
the distinct result set estimate for each VertexPath
"""
distinct_result_set_estimates = {}
for vertex_path, vertex_type in types.items():
vertex_type_name = vertex_type.name
class_count = schema_info.statistics.get_class_count(vertex_type_name)
distinct_result_set_estimates[vertex_path] = adjust_counts_with_selectivity(
class_count, selectivities[vertex_path]
)
single_destination_traversals = set()
for vertex_path, _ in types.items():
if len(vertex_path) > 1:
from_path = vertex_path[:-1]
to_path = vertex_path
edge_direction, edge_name = get_edge_direction_and_name(vertex_path[-1])
no_constraints = EdgeConstraint(0) # unset all bits of the flag
edge_constraints = schema_info.edge_constraints.get(edge_name, no_constraints)
if edge_direction == "in":
from_path, to_path = to_path, from_path
if EdgeConstraint.AtMostOneDestination in edge_constraints:
single_destination_traversals.add((from_path, to_path))
if EdgeConstraint.AtMostOneSource in edge_constraints:
single_destination_traversals.add((to_path, from_path))
# Make sure there's no path of many-to-one traversals leading to a node with higher
# distinct_result_set_estimate.
max_path_length = len(single_destination_traversals)
for _ in range(max_path_length):
for from_path, to_path in single_destination_traversals:
distinct_result_set_estimates[to_path] = min(
distinct_result_set_estimates[to_path], distinct_result_set_estimates[from_path]
)
return distinct_result_set_estimates
def get_pagination_capacities(
schema_info: QueryPlanningSchemaInfo,
types: Dict[VertexPath, Union[GraphQLObjectType, GraphQLInterfaceType]],
fields_eligible_for_pagination: Set[PropertyPath],
field_value_intervals: Dict[PropertyPath, Interval[Any]],
distinct_result_set_estimates: Dict[VertexPath, float],
) -> Dict[PropertyPath, int]:
"""Get the pagination capacity for each eligible pagination field.
The pagination capacity of a field is defined as the maximum number of pages we can split
the query results in by adding filters on this field with some confidence that the pages
will have similar sizes. This reasoning is local: if a filter in a different location is
correlated with the values on this field, the generated pages might turn out to have
wildly different sizes. This problem is somewhat unavoidable.
Args:
schema_info: QueryPlanningSchemaInfo
types: the type at each node
field_value_intervals: see get_field_value_intervals
distinct_result_set_estimates: see get_distinct_result_set_estimates
Returns:
the pagination capacity of each PropertyPath
"""
pagination_capacities = {}
for vertex_path, vertex_type in types.items():
vertex_type_name = vertex_type.name
for field_name, _ in vertex_type.fields.items():
property_path = PropertyPath(vertex_path, field_name)
if property_path not in fields_eligible_for_pagination:
continue
if is_uuid4_type(schema_info, vertex_type_name, field_name):
pagination_capacities[property_path] = int(
distinct_result_set_estimates[vertex_path]
)
elif field_supports_range_reasoning(schema_info, vertex_type_name, field_name):
field_value_interval = field_value_intervals.get(
property_path, Interval(None, None)
)
quantiles = schema_info.statistics.get_field_quantiles(vertex_type_name, field_name)
if quantiles is not None:
# The first and last values of the quantiles are the minimum and maximum
# observed values. We call all other values the proper quantiles. We don't
# directly use the minimum and maximum values as page boundaries since we
# will most likely generate empty pages.
proper_quantiles = quantiles[1:-1]
# Get the relevant quantiles (ones inside the field_value_interval)
min_quantile = 0
max_quantile = len(proper_quantiles)
if field_value_interval.lower_bound is not None:
min_quantile = bisect.bisect_left(
proper_quantiles, field_value_interval.lower_bound
)
if field_value_interval.upper_bound is not None:
max_quantile = bisect.bisect_left(
proper_quantiles, field_value_interval.upper_bound
)
relevant_quantiles = proper_quantiles[min_quantile:max_quantile]
# TODO(bojanserafimov): If the relevant quantiles contain duplicates, the
# pagination capacity would be lower.
pagination_capacities[property_path] = min(
len(relevant_quantiles) + 1,
int(distinct_result_set_estimates[vertex_path]),
)
return pagination_capacities
@dataclass
class QueryPlanningAnalysis:
"""A cache for analysis passes over a fixed query and fixed schema_info."""
schema_info: QueryPlanningSchemaInfo
ast_with_parameters: ASTWithParameters
@cached_property
def query_string_with_parameters(self) -> QueryStringWithParameters:
"""Return the query in string form."""
return QueryStringWithParameters.from_ast_with_parameters(self.ast_with_parameters)
@cached_property
def metadata_table(self) -> QueryMetadataTable:
"""Return the metadata table for this query."""
ir_and_metadata = ast_to_ir(
self.schema_info.schema,
self.ast_with_parameters.query_ast,
type_equivalence_hints=self.schema_info.type_equivalence_hints,
)
validate_arguments(ir_and_metadata.input_metadata, self.ast_with_parameters.parameters)
return ir_and_metadata.query_metadata_table
@cached_property
def types(self) -> Dict[VertexPath, Union[GraphQLObjectType, GraphQLInterfaceType]]:
"""Find the type at each VertexPath."""
return get_types(self.metadata_table)
@cached_property
def classes_with_missing_counts(self) -> Set[str]:
"""Return classes that don't have count statistics."""
classes_with_missing_counts = set()
for vertex_path, vertex_type in self.types.items():
if self.schema_info.statistics.get_class_count(vertex_type.name) is None:
classes_with_missing_counts.add(vertex_type.name)
if len(vertex_path) > 1:
_, edge_name = get_edge_direction_and_name(vertex_path[-1])
if self.schema_info.statistics.get_class_count(edge_name) is None:
classes_with_missing_counts.add(edge_name)
return classes_with_missing_counts
@cached_property
def cardinality_estimate(self) -> float:
"""Return the cardinality estimate for this query."""
# TODO use selectivity analysis pass instead of recomputing it
return estimate_query_result_cardinality(
self.schema_info, self.metadata_table, self.ast_with_parameters.parameters
)
@cached_property
def filters(self) -> Dict[VertexPath, Set[FilterInfo]]:
"""Get the filters at each VertexPath."""
return get_filters(self.metadata_table)
@cached_property
def fold_scope_roots(self) -> Dict[VertexPath, VertexPath]:
"""Map each VertexPath in the query that's inside a fold to the VertexPath of the fold."""
return get_fold_scope_roots(self.metadata_table)
@cached_property
def single_field_filters(self) -> Dict[PropertyPath, Set[FilterInfo]]:
"""Find the single field filters for each field. Filters like name_or_alias are excluded."""
return get_single_field_filters(self.filters)
@cached_property
def fields_eligible_for_pagination(self) -> Set[PropertyPath]:
"""Return all the fields we can consider for pagination."""
return get_fields_eligible_for_pagination(
self.schema_info,
self.types,
self.single_field_filters,
self.fold_scope_roots,
)
@cached_property
def field_value_intervals(self) -> Dict[PropertyPath, Interval[Any]]:
"""Return the field value intervals for this query."""
return get_field_value_intervals(
self.schema_info,
self.types,
self.single_field_filters,
self.ast_with_parameters.parameters,
)
@cached_property
def selectivities(self) -> Dict[VertexPath, Selectivity]:
"""Get the combined selectivities of filters at each vertex."""
return get_selectivities(
self.schema_info, self.types, self.filters, self.ast_with_parameters.parameters
)
@cached_property
def distinct_result_set_estimates(self) -> Dict[VertexPath, float]:
"""Return the distinct result set estimates for this query."""
return get_distinct_result_set_estimates(
self.schema_info, self.types, self.selectivities, self.ast_with_parameters.parameters
)
@cached_property
def pagination_capacities(self) -> Dict[PropertyPath, int]:
"""Return the pagination capacities for this query."""
return get_pagination_capacities(
self.schema_info,
self.types,
self.fields_eligible_for_pagination,
self.field_value_intervals,
self.distinct_result_set_estimates,
)
def analyze_query_string(
schema_info: QueryPlanningSchemaInfo, query_with_params: QueryStringWithParameters
) -> QueryPlanningAnalysis:
"""Create a QueryPlanningAnalysis object for the given query string and parameters."""
ast_with_params = ASTWithParameters.from_query_string_with_parameters(query_with_params)
return analyze_query_ast(schema_info, ast_with_params)
def analyze_query_ast(
schema_info: QueryPlanningSchemaInfo, ast_with_params: ASTWithParameters
) -> QueryPlanningAnalysis:
"""Create a QueryPlanningAnalysis object for the given query AST and parameters."""
# This function exists for the sake of parity with "analyze_query_string()" as
# the analysis operations in question work just as well over ASTs as over query strings.
# Even though this function is just a proxy for the QueryPlanningAnalysis constructor,
# this is not something that would be obvious to the reader. What we are trying to avoid
# is a situation where someone doesn't realize QueryPlanningAnalysis can be made from an AST,
# so they print the AST into a query string, only to parse it again with analyze_query_string().
return QueryPlanningAnalysis(schema_info, ast_with_params)
| |
from __future__ import absolute_import, print_function, division
import errno
import os
import sys
from theano.compat import PY3
from theano.gof.compilelock import get_lock, release_lock
from theano import config
from . import cmodule
# TODO These two lines may be removed in the future, when we are 100% sure
# noone has an old cutils_ext.so lying around anymore.
if os.path.exists(os.path.join(config.compiledir, 'cutils_ext.so')):
os.remove(os.path.join(config.compiledir, 'cutils_ext.so'))
def compile_cutils_code():
types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',
'int256', 'uint8', 'uint16', 'uint32',
'uint64', 'uint128', 'uint256',
'float16', 'float32', 'float64',
'float80', 'float96', 'float128',
'float256']]
complex_types = ['npy_' + t for t in ['complex32', 'complex64',
'complex128', 'complex160',
'complex192', 'complex512']]
inplace_map_template = """
#if defined(%(typen)s)
static void %(type)s_inplace_add(PyArrayMapIterObject *mit,
PyArrayIterObject *it, int inc_or_set)
{
int index = mit->size;
while (index--) {
%(op)s
PyArray_MapIterNext(mit);
PyArray_ITER_NEXT(it);
}
}
#endif
"""
floatadd = ("((%(type)s*)mit->dataptr)[0] = "
"(inc_or_set ? ((%(type)s*)mit->dataptr)[0] : 0)"
" + ((%(type)s*)it->dataptr)[0];")
complexadd = """
((%(type)s*)mit->dataptr)[0].real =
(inc_or_set ? ((%(type)s*)mit->dataptr)[0].real : 0)
+ ((%(type)s*)it->dataptr)[0].real;
((%(type)s*)mit->dataptr)[0].imag =
(inc_or_set ? ((%(type)s*)mit->dataptr)[0].imag : 0)
+ ((%(type)s*)it->dataptr)[0].imag;
"""
fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),
'op': floatadd % {'type': t}}
for t in types] +
[inplace_map_template % {'type': t, 'typen': t.upper(),
'op': complexadd % {'type': t}}
for t in complex_types])
def gen_binop(type, typen):
return """
#if defined(%(typen)s)
%(type)s_inplace_add,
#endif
""" % dict(type=type, typen=typen)
fn_array = ("static inplace_map_binop addition_funcs[] = {" +
''.join([gen_binop(type=t, typen=t.upper())
for t in types + complex_types]) + "NULL};\n")
def gen_num(typen):
return """
#if defined(%(typen)s)
%(typen)s,
#endif
""" % dict(type=type, typen=typen)
type_number_array = ("static int type_numbers[] = {" +
''.join([gen_num(typen=t.upper())
for t in types + complex_types]) + "-1000};")
code = ("""
#if NPY_API_VERSION >= 0x00000008
typedef void (*inplace_map_binop)(PyArrayMapIterObject *,
PyArrayIterObject *, int inc_or_set);
""" + fns + fn_array + type_number_array + """
static int
map_increment(PyArrayMapIterObject *mit, PyObject *op,
inplace_map_binop add_inplace, int inc_or_set)
{
PyArrayObject *arr = NULL;
PyArrayIterObject *it;
PyArray_Descr *descr;
if (mit->ait == NULL) {
return -1;
}
descr = PyArray_DESCR(mit->ait->ao);
Py_INCREF(descr);
arr = (PyArrayObject *)PyArray_FromAny(op, descr,
0, 0, NPY_ARRAY_FORCECAST, NULL);
if (arr == NULL) {
return -1;
}
if ((mit->subspace != NULL) && (mit->consec)) {
PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);
if (arr == NULL) {
return -1;
}
}
it = (PyArrayIterObject*)
PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);
if (it == NULL) {
Py_DECREF(arr);
return -1;
}
(*add_inplace)(mit, it, inc_or_set);
Py_DECREF(arr);
Py_DECREF(it);
return 0;
}
static PyObject *
inplace_increment(PyObject *dummy, PyObject *args)
{
PyObject *arg_a = NULL, *index=NULL, *inc=NULL;
int inc_or_set = 1;
PyArrayObject *a;
inplace_map_binop add_inplace = NULL;
int type_number = -1;
int i = 0;
PyArrayMapIterObject * mit;
if (!PyArg_ParseTuple(args, "OOO|i", &arg_a, &index,
&inc, &inc_or_set)) {
return NULL;
}
if (!PyArray_Check(arg_a)) {
PyErr_SetString(PyExc_ValueError,
"needs an ndarray as first argument");
return NULL;
}
a = (PyArrayObject *) arg_a;
if (PyArray_FailUnlessWriteable(a, "input/output array") < 0) {
return NULL;
}
if (PyArray_NDIM(a) == 0) {
PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed.");
return NULL;
}
type_number = PyArray_TYPE(a);
while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){
if (type_number == type_numbers[i]) {
add_inplace = addition_funcs[i];
break;
}
i++ ;
}
if (add_inplace == NULL) {
PyErr_SetString(PyExc_TypeError, "unsupported type for a");
return NULL;
}
mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);
if (mit == NULL) {
goto fail;
}
if (map_increment(mit, inc, add_inplace, inc_or_set) != 0) {
goto fail;
}
Py_DECREF(mit);
Py_INCREF(Py_None);
return Py_None;
fail:
Py_XDECREF(mit);
return NULL;
}
#endif
""")
return code
def compile_cutils():
"""
Do just the compilation of cutils_ext.
"""
code = ("""
#include <Python.h>
#include "numpy/arrayobject.h"
#include "theano_mod_helper.h"
extern "C"{
static PyObject *
run_cthunk(PyObject *self, PyObject *args)
{
PyObject *py_cthunk = NULL;
if(!PyArg_ParseTuple(args,"O",&py_cthunk))
return NULL;
if (!PyCObject_Check(py_cthunk)) {
PyErr_SetString(PyExc_ValueError,
"Argument to run_cthunk must be a PyCObject.");
return NULL;
}
void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);
int (*fn)(void*) = (int (*)(void*))(ptr_addr);
void* it = PyCObject_GetDesc(py_cthunk);
int failure = fn(it);
return Py_BuildValue("i", failure);
}""")
code += compile_cutils_code()
code += ("""static PyMethodDef CutilsExtMethods[] = {
{"run_cthunk", run_cthunk, METH_VARARGS|METH_KEYWORDS,
"Run a theano cthunk."},
#if NPY_API_VERSION >= 0x00000008
{"inplace_increment", inplace_increment,
METH_VARARGS,
"increments a numpy array inplace at the passed indexes."},
#endif
{NULL, NULL, 0, NULL} /* Sentinel */
};""")
if PY3:
# This is not the most efficient code, but it is written this way to
# highlight the changes needed to make 2.x code compile under python 3.
code = code.replace("<Python.h>", '"numpy/npy_3kcompat.h"', 1)
code = code.replace("PyCObject", "NpyCapsule")
code += """
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"cutils_ext",
NULL,
-1,
CutilsExtMethods,
};
PyMODINIT_FUNC
PyInit_cutils_ext(void) {
import_array();
return PyModule_Create(&moduledef);
}
}
"""
else:
code += """
PyMODINIT_FUNC
initcutils_ext(void)
{
import_array();
(void) Py_InitModule("cutils_ext", CutilsExtMethods);
}
} //extern C
"""
loc = os.path.join(config.compiledir, 'cutils_ext')
if not os.path.exists(loc):
try:
os.mkdir(loc)
except OSError as e:
assert e.errno == errno.EEXIST
assert os.path.exists(loc), loc
args = cmodule.GCC_compiler.compile_args(march_flags=False)
cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,
preargs=args)
try:
# See gh issue #728 for why these lines are here. Summary: compiledir
# must be at the beginning of the path to avoid conflicts with any other
# cutils_ext modules that might exist. An __init__.py file must be created
# for the same reason. Note that these 5 lines may seem redundant (they are
# repeated in compile_str()) but if another cutils_ext does exist then it
# will be imported and compile_str won't get called at all.
sys.path.insert(0, config.compiledir)
location = os.path.join(config.compiledir, 'cutils_ext')
if not os.path.exists(location):
try:
os.mkdir(location)
except OSError as e:
assert e.errno == errno.EEXIST
assert os.path.exists(location), location
if not os.path.exists(os.path.join(location, '__init__.py')):
open(os.path.join(location, '__init__.py'), 'w').close()
try:
from cutils_ext.cutils_ext import * # noqa
except ImportError:
get_lock()
# Ensure no-one else is currently modifying the content of the compilation
# directory. This is important to prevent multiple processes from trying to
# compile the cutils_ext module simultaneously.
try:
try:
# We must retry to import it as some other process could
# have been compiling it between the first failed import
# and when we receive the lock
from cutils_ext.cutils_ext import * # noqa
except ImportError:
compile_cutils()
from cutils_ext.cutils_ext import * # noqa
finally:
# Release lock on compilation directory.
release_lock()
finally:
if sys.path[0] == config.compiledir:
del sys.path[0]
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that estimates a custom non-streaming statistic.
A Beam transform will compute the custom statistic over multiple samples of the
data to estimate the true value of the statistic over the entire dataset.
"""
import collections
import functools
from typing import Dict, Iterable, Text, Tuple
import apache_beam as beam
import numpy as np
import pyarrow as pa
from tensorflow_data_validation import constants
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.utils import stats_util
from tfx_bsl.arrow import table_util
from tensorflow_metadata.proto.v0 import statistics_pb2
def _get_partitioned_statistics_summary(
statistics: Dict[types.FeaturePath, Dict[Text, np.ndarray]]
) -> Dict[types.FeaturePath, Dict[Text, float]]:
"""Computes meta-statistics over the custom stats in the input dict."""
summary = collections.defaultdict(collections.defaultdict)
for feature_path, feature_statistics in statistics.items():
summary_for_feature = summary[feature_path]
for stat_name, stat_values in feature_statistics.items():
summary_for_feature['min_' + stat_name] = np.min(stat_values)
summary_for_feature['max_' + stat_name] = np.max(stat_values)
summary_for_feature['mean_' + stat_name] = np.mean(stat_values)
summary_for_feature['median_' + stat_name] = np.median(stat_values)
summary_for_feature['std_dev_' + stat_name] = np.std(stat_values)
summary_for_feature['num_partitions_' + stat_name] = stat_values.size
return summary
def get_valid_statistics(
statistics: Dict[types.FeaturePath, Dict[Text, np.ndarray]],
min_partitions_stat_presence: int
) -> Dict[types.FeaturePath, Dict[Text, np.ndarray]]:
"""Filters out statistics that were not computed over all partitions."""
valid_statistics = collections.defaultdict(collections.defaultdict)
for feature_path, feature_statistics in statistics.items():
for stat_name, stat_values in feature_statistics.items():
# Only keep statistics that appear min_partitions_stat_presence times
if len(stat_values) >= min_partitions_stat_presence:
valid_statistics[feature_path][stat_name] = np.array(stat_values)
return valid_statistics
def _default_assign_to_partition(
sliced_record_batch: types.SlicedRecordBatch,
num_partitions: int) -> Tuple[Tuple[types.SliceKey, int], pa.RecordBatch]:
"""Assigns an example to a partition key."""
slice_key, record_batch = sliced_record_batch
return (slice_key, np.random.randint(num_partitions)), record_batch
@beam.typehints.with_input_types(types.SlicedRecordBatch)
@beam.typehints.with_output_types(Tuple[Tuple[types.SliceKey, int],
pa.RecordBatch])
@beam.ptransform_fn
def _DefaultPartitionTransform(pcol, num_partitions): # pylint: disable=invalid-name
"""Ptransform wrapping _default_assign_to_partition."""
return pcol | 'DefaultPartition' >> beam.Map(_default_assign_to_partition,
num_partitions)
class PartitionedStatsFn(object):
"""A custom non-streaming statistic.
A PartitionedStatsFn is a custom statistic that cannot be computed in a
streaming fashion. A user is required to implement the compute function.
NonStreamingCustomStatsGenerator are initialized with
a PartitionedStatsFn to estimate the PartitionedStatsFn over a large dataset.
Examples in the dataset will be randomly assigned to a partition. Then the
compute method will be called on each partition. If the examples in the
partition contain invalid feature values, implementations of
PartitionedStatsFn also have the option to "gracefully fail" without returning
a statistic value for any invalid features.
"""
def compute(self, examples: types.ExampleBatch
) -> statistics_pb2.DatasetFeatureStatistics:
"""Computes custom statistics over the batch of examples.
Args:
examples: The batch of examples.
Returns:
DatasetFeatureStatistics containing the custom statistics for
each feature in the dataset.
The DatasetFeatureStatistics proto can be constructed using the
make_dataset_feature_stats_proto method.
"""
raise NotImplementedError()
def partitioner(self, num_partitions: int) -> beam.PTransform:
"""Optional PTransform to perform partition assignment.
This may be overridden by subclasses to return a PTransform matching the
signature of _default_partition_transform, which will be used if this method
returns None.
Args:
num_partitions: The number of partitions to use. Overriding subclasses are
free to use a different number of partitions.
Returns:
A PTransform.
"""
return _DefaultPartitionTransform(num_partitions) # pylint: disable=no-value-for-parameter
class _PartitionedStatisticsAnalyzerAccumulator(object):
"""Holds the partial state of partitioned statistics summaries."""
def __init__(self):
# A partial is used so that the class is pickleable.
self.statistics = collections.defaultdict(
functools.partial(collections.defaultdict, list))
class PartitionedStatisticsAnalyzer(beam.CombineFn):
"""Computes meta-statistics for non-streaming partitioned statistics.
This analyzer computes meta-statistics including the min, max, mean, median
and std dev of numeric statistics that are calculated over partitions
of the dataset. Statistics may be missing from some partitions if
the partition contains invalid feature values causing PartitionedStatsFn to
"gracefully fail". Meta-statistics for a feature are only calculated if the
number of partitions in which the statistic is computed passes a configurable
threshold.
"""
def __init__(self, min_partitions_stat_presence: int):
"""Initializes the analyzer."""
# Meta-stats are only computed if a stat is found in at least
# min_partitions_stat_presence number of partitions.
self._min_partitions_stat_presence = min_partitions_stat_presence
def create_accumulator(self) -> _PartitionedStatisticsAnalyzerAccumulator:
"""Creates an accumulator, which stores partial state of meta-statistics."""
return _PartitionedStatisticsAnalyzerAccumulator()
def add_input(self, accumulator: _PartitionedStatisticsAnalyzerAccumulator,
statistic: statistics_pb2.DatasetFeatureStatistics
) -> _PartitionedStatisticsAnalyzerAccumulator:
"""Adds the input (DatasetFeatureStatistics) into the accumulator."""
for feature in statistic.features:
for stat in feature.custom_stats:
accumulator.statistics[
types.FeaturePath.from_proto(feature.path)][stat.name].append(
stat.num)
return accumulator
def merge_accumulators(
self, accumulators: Iterable[_PartitionedStatisticsAnalyzerAccumulator]
) -> _PartitionedStatisticsAnalyzerAccumulator:
"""Merges together a list of PartitionedStatisticsAnalyzerAccumulators."""
it = iter(accumulators)
result = next(it)
for accumulator in it:
for feature_path, feature_statistics in accumulator.statistics.items():
for stat_name, stat_values in feature_statistics.items():
result.statistics[feature_path][stat_name].extend(stat_values)
return result
def extract_output(self,
accumulator: _PartitionedStatisticsAnalyzerAccumulator
) -> statistics_pb2.DatasetFeatureStatistics:
"""Returns meta-statistics as a DatasetFeatureStatistics proto."""
valid_stats_summary = _get_partitioned_statistics_summary(
get_valid_statistics(accumulator.statistics,
self._min_partitions_stat_presence))
return stats_util.make_dataset_feature_stats_proto(valid_stats_summary)
class _SampleRecordBatchRowsAccumulator(object):
"""Accumulator to keep track of the current (top-k) sample of records."""
__slots__ = [
'record_batches', 'curr_num_rows', 'curr_byte_size', 'random_ints'
]
def __init__(self):
# Record batches to sample.
self.record_batches = []
# The total number of rows (examples) in all of `record_batches`.
self.curr_num_rows = 0
# Current total byte size of all the pa.RecordBatches accumulated.
self.curr_byte_size = 0
# This is a list of numpy array of random integers. Each element maps to one
# row in each record batch. Each row should only be assigned a random number
# once, in order to avoid sampling bias. Thus, we need to preserve the
# assigned number for each accumulator, across multiple `compacts`.
self.random_ints = []
# TODO(b/192393883): move this to tfx_bsl.
@beam.typehints.with_input_types(pa.RecordBatch)
@beam.typehints.with_output_types(pa.RecordBatch)
class _SampleRecordBatchRows(beam.CombineFn):
"""Samples rows from record batches.
The record batches in the partition can vary in the number of rows.
SamplePartition guarantees that the sample returned is always going to be
<= sample_size.
The actual sampling occurs in `compact`. It uses np.partition to calculate
the top-k of record batch's rows. Where the top-k is a random number assigned
to each row. Given a uniform distribution of the random number, we can keep a
running sample of the partition of size k. This gives each row an equal
probability of being selected.
"""
_BUFFER_SIZE_SCALAR = 5
def __init__(self, sample_size: int):
"""Initializes the analyzer."""
self._sample_size = sample_size
# Number of record batches in accumulator when compacting.
self._combine_num_record_batches = beam.metrics.Metrics.distribution(
constants.METRICS_NAMESPACE,
'sample_record_batch_rows_combine_num_record_batches')
self._combine_num_columns = beam.metrics.Metrics.distribution(
constants.METRICS_NAMESPACE, 'sample_record_batch_num_columns')
# Post compress byte size.
self._combine_byte_size = beam.metrics.Metrics.distribution(
constants.METRICS_NAMESPACE,
'sample_record_batch_rows_combine_byte_size')
# Number of compacts.
self._num_compacts = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, 'sample_record_batch_rows_num_compacts')
# Total number of rows.
self._num_instances = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, 'sample_record_batch_rows_num_instances')
# We allow our accumulators to keep a buffer of _BUFFER_SIZE_SCALAR x sample
# size. With this threshold, OOM issues are possible, but unlikely.
self._merge_record_batch_threshold = self._BUFFER_SIZE_SCALAR * sample_size
def create_accumulator(self) -> _SampleRecordBatchRowsAccumulator:
"""Creates an accumulator."""
return _SampleRecordBatchRowsAccumulator()
def add_input(
self, accumulator: _SampleRecordBatchRowsAccumulator,
record_batch: pa.RecordBatch) -> _SampleRecordBatchRowsAccumulator:
"""Adds the input into the accumulator."""
num_rows = record_batch.num_rows
self._num_instances.inc(num_rows)
self._combine_num_columns.update(len(record_batch.columns))
accumulator.record_batches.append(record_batch)
accumulator.curr_num_rows += num_rows
accumulator.curr_byte_size += record_batch.nbytes
curr_random_ints = np.random.randint(
0,
np.iinfo(np.int64).max,
dtype=np.int64,
size=(num_rows,))
accumulator.random_ints.append(curr_random_ints)
if accumulator.curr_num_rows > self._merge_record_batch_threshold:
accumulator = self._compact_impl(accumulator)
return accumulator
def merge_accumulators(
self, accumulators: Iterable[_SampleRecordBatchRowsAccumulator]
) -> _SampleRecordBatchRowsAccumulator:
"""Merges together a list of _SampleRecordBatchRowsAccumulator."""
result = _SampleRecordBatchRowsAccumulator()
for acc in accumulators:
result.record_batches.extend(acc.record_batches)
result.curr_num_rows += acc.curr_num_rows
result.curr_byte_size += acc.curr_byte_size
result.random_ints.extend(acc.random_ints)
# Compact if we are over the threshold.
if result.curr_num_rows > self._merge_record_batch_threshold:
result = self._compact_impl(result)
result = self._compact_impl(result)
return result
def compact(
self, accumulator: _SampleRecordBatchRowsAccumulator
) -> _SampleRecordBatchRowsAccumulator:
return self._compact_impl(accumulator)
def extract_output(self,
accumulator: _SampleRecordBatchRowsAccumulator
) -> pa.RecordBatch:
"""Returns the sample as a record batch."""
# We force the compact, to comply with the contract of outputting one record
# batch.
acc = self._compact_impl(accumulator)
assert len(acc.record_batches) == 1
return acc.record_batches[0]
def _compact_impl(
self, accumulator: _SampleRecordBatchRowsAccumulator
) -> _SampleRecordBatchRowsAccumulator:
"""Compacts the accumulator.
This compact selects samples rows from the record batch, and merges them
into one record batch. We can then clear the cache of all record batches
seen so far. If the accumulator holds too few record batches, then nothing
will be compacted.
The sampling is done by assigning each row in the record batch a random
number. Then we choose the top-k of the random numbers to get a sample of
size k.
Args:
accumulator: The _SampleRecordBatchRowsAccumulator to compact.
Returns:
A _SampleRecordBatchRowsAccumulator that contains one or a list of record
batch.
"""
self._num_compacts.inc(1)
self._combine_num_record_batches.update(len(accumulator.record_batches))
# There is nothing to compact.
if accumulator.curr_num_rows <= 1:
return accumulator
# There is no need to compact yet.
if (len(accumulator.record_batches) <= 1 and
accumulator.curr_num_rows <= self._sample_size):
return accumulator
k = min(self._sample_size, accumulator.curr_num_rows)
rand_ints = np.concatenate(accumulator.random_ints)
# Find the value that is the breakpoint for the top-k.
kth_value = np.partition(rand_ints, k - 1)[k - 1]
# This mask will always have >= 1 Trues.
equals_to_kth = (rand_ints == kth_value)
# This mask will always have < k Trues.
less_than_kth = rand_ints < kth_value
# Since there may be duplicate values, `equals_to_kth + less_than_kth` might
# be greater than `k`. We need to keep track of how many to add, without
# surpassing `k`.
kth_to_add = k - np.sum(less_than_kth)
# Preserve the random integers that we had assigned to each row.
sample_random_ints = rand_ints[rand_ints <= kth_value][:k]
beg = 0
sample_indices = []
for rb in accumulator.record_batches:
size = rb.num_rows
end = beg + size
less_than_kth_indices = np.nonzero(less_than_kth[beg:end])[0]
indices = less_than_kth_indices
# Add indices of any duplicate values that are equal to `k`.
if kth_to_add > 0:
equals_to_kth_indices = np.nonzero(equals_to_kth[beg:end])[0]
if equals_to_kth_indices.size > 0:
if equals_to_kth_indices.size >= kth_to_add:
indices = np.concatenate(
[less_than_kth_indices, equals_to_kth_indices[:kth_to_add]])
kth_to_add = 0
else:
indices = np.concatenate(
[less_than_kth_indices, equals_to_kth_indices])
kth_to_add -= equals_to_kth_indices.size
sample_indices.append(indices)
beg += size
result = _SampleRecordBatchRowsAccumulator()
# Take and merge the record batches, based on the sampled indices.
rbs = []
for rb, indices in zip(accumulator.record_batches, sample_indices):
rbs.append(table_util.RecordBatchTake(rb, pa.array(indices)))
compressed_rb = table_util.MergeRecordBatches(rbs)
result.record_batches = [compressed_rb]
result.curr_num_rows = compressed_rb.num_rows
result.curr_byte_size = compressed_rb.nbytes
result.random_ints = [sample_random_ints]
self._combine_byte_size.update(result.curr_byte_size)
return result
def _process_partition(
partition: Tuple[Tuple[types.SliceKey, int], pa.RecordBatch],
stats_fn: PartitionedStatsFn
) -> Tuple[types.SliceKey, statistics_pb2.DatasetFeatureStatistics]:
"""Process batch in a single partition."""
(slice_key, _), record_batch = partition
return slice_key, stats_fn.compute(record_batch)
# Input type check is commented out, as beam python will fail the type check
# when input is an empty dict.
# @beam.typehints.with_input_types(types.SlicedExample)
@beam.typehints.with_output_types(
Tuple[types.SliceKey, statistics_pb2.DatasetFeatureStatistics])
class _GenerateNonStreamingCustomStats(beam.PTransform):
"""A beam.PTransform that implements NonStreamingCustomStatsGenerator."""
def __init__(self, stats_fn: PartitionedStatsFn,
num_partitions: int, min_partitions_stat_presence: int,
seed: int, max_examples_per_partition: int, batch_size: int,
name: Text) -> None:
"""Initializes _GenerateNonStreamingCustomStats."""
self._stats_fn = stats_fn
self._num_partitions = num_partitions
self._min_partitions_stat_presence = min_partitions_stat_presence
self._name = name
self._seed = seed
self._max_examples_per_partition = max_examples_per_partition
# Seeds the random number generator used in the partitioner.
np.random.seed(self._seed)
def expand(self, pcoll: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
"""Estimates the user defined statistic."""
return (pcoll
| 'AssignBatchToPartition' >> self._stats_fn.partitioner(
self._num_partitions)
| 'GroupPartitionsIntoList' >> beam.CombinePerKey(
_SampleRecordBatchRows(self._max_examples_per_partition))
| 'ProcessPartition' >> beam.Map(
_process_partition, stats_fn=self._stats_fn)
| 'ComputeMetaStats' >> beam.CombinePerKey(
PartitionedStatisticsAnalyzer(min_partitions_stat_presence=self
._min_partitions_stat_presence)))
class NonStreamingCustomStatsGenerator(stats_generator.TransformStatsGenerator):
"""Estimates custom statistics in a non-streaming fashion.
A TransformStatsGenerator which partitions the input data and calls the user
specified stats_fn over each partition. Meta-statistics are calculated over
the statistics returned by stats_fn to estimate the true value of the
statistic. For invalid feature values, the worker computing PartitionedStatsFn
over a partition may "gracefully fail" and not report that statistic (refer to
PartitionedStatsFn for more information). Meta-statistics for a feature are
only calculated if the number of partitions where the statistic is computed
exceeds a configurable threshold.
A large number of examples in a partition may result in worker OOM errors.
This can be prevented by setting max_examples_per_partition.
"""
def __init__(
self,
stats_fn: PartitionedStatsFn,
num_partitions: int,
min_partitions_stat_presence: int,
seed: int,
max_examples_per_partition: int,
batch_size: int = 1000,
name: Text = 'NonStreamingCustomStatsGenerator') -> None:
"""Initializes NonStreamingCustomStatsGenerator.
Args:
stats_fn: The PartitionedStatsFn that will be run on each sample.
num_partitions: The number of partitions the stat will be calculated on.
min_partitions_stat_presence: The minimum number of partitions a stat
computation must succeed in for the result to be returned.
seed: An int used to seed the numpy random number generator.
max_examples_per_partition: An integer used to specify the maximum
number of examples per partition to limit memory usage in a worker. If
the number of examples per partition exceeds this value, the examples
are randomly selected.
batch_size: Number of examples per input batch.
name: An optional unique name associated with the statistics generator.
"""
super(NonStreamingCustomStatsGenerator, self).__init__(
name=name,
ptransform=_GenerateNonStreamingCustomStats(
stats_fn=stats_fn,
num_partitions=num_partitions,
min_partitions_stat_presence=min_partitions_stat_presence,
seed=seed,
max_examples_per_partition=max_examples_per_partition,
batch_size=batch_size,
name=name
))
| |
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import time
from lib.core.common import clearConsoleLine
from lib.core.common import dataToStdout
from lib.core.common import filterListValue
from lib.core.common import getFileItems
from lib.core.common import Backend
from lib.core.common import getPageWordSet
from lib.core.common import hashDBWrite
from lib.core.common import randomInt
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import safeStringFormat
from lib.core.common import safeSQLIdentificatorNaming
from lib.core.common import unsafeSQLIdentificatorNaming
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapDataException
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.settings import METADB_SUFFIX
from lib.core.settings import BRUTE_COLUMN_EXISTS_TEMPLATE
from lib.core.settings import BRUTE_TABLE_EXISTS_TEMPLATE
from lib.core.threads import getCurrentThreadData
from lib.core.threads import runThreads
from lib.request import inject
def _addPageTextWords():
wordsList = []
infoMsg = "adding words used on web page to the check list"
logger.info(infoMsg)
pageWords = getPageWordSet(kb.originalPage)
for word in pageWords:
word = word.lower()
if len(word) > 2 and not word[0].isdigit() and word not in wordsList:
wordsList.append(word)
return wordsList
def tableExists(tableFile, regex=None):
if kb.tableExistsChoice is None and not any(_ for _ in kb.injection.data if _ not in (PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED)) and not conf.direct:
warnMsg = "it's not recommended to use '%s' and/or '%s' " % (PAYLOAD.SQLINJECTION[PAYLOAD.TECHNIQUE.TIME], PAYLOAD.SQLINJECTION[PAYLOAD.TECHNIQUE.STACKED])
warnMsg += "for common table existence check"
logger.warn(warnMsg)
message = "are you sure you want to continue? [y/N] "
test = readInput(message, default="N")
kb.tableExistsChoice = test[0] in ("y", "Y")
if not kb.tableExistsChoice:
return None
result = inject.checkBooleanExpression("%s" % safeStringFormat(BRUTE_TABLE_EXISTS_TEMPLATE, (randomInt(1), randomStr())))
if conf.db and Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2):
conf.db = conf.db.upper()
if result:
errMsg = "can't use table existence check because of detected invalid results "
errMsg += "(most probably caused by inability of the used injection "
errMsg += "to distinguish errornous results)"
raise SqlmapDataException(errMsg)
tables = getFileItems(tableFile, lowercase=Backend.getIdentifiedDbms() in (DBMS.ACCESS,), unique=True)
infoMsg = "checking table existence using items from '%s'" % tableFile
logger.info(infoMsg)
tables.extend(_addPageTextWords())
tables = filterListValue(tables, regex)
threadData = getCurrentThreadData()
threadData.shared.count = 0
threadData.shared.limit = len(tables)
threadData.shared.value = []
threadData.shared.unique = set()
def tableExistsThread():
threadData = getCurrentThreadData()
while kb.threadContinue:
kb.locks.count.acquire()
if threadData.shared.count < threadData.shared.limit:
table = safeSQLIdentificatorNaming(tables[threadData.shared.count], True)
threadData.shared.count += 1
kb.locks.count.release()
else:
kb.locks.count.release()
break
if conf.db and METADB_SUFFIX not in conf.db and Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.ACCESS, DBMS.FIREBIRD):
fullTableName = "%s%s%s" % (conf.db, '..' if Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE) else '.', table)
else:
fullTableName = table
result = inject.checkBooleanExpression("%s" % safeStringFormat(BRUTE_TABLE_EXISTS_TEMPLATE, (randomInt(1), fullTableName)))
kb.locks.io.acquire()
if result and table.lower() not in threadData.shared.unique:
threadData.shared.value.append(table)
threadData.shared.unique.add(table.lower())
if conf.verbose in (1, 2) and not hasattr(conf, "api"):
clearConsoleLine(True)
infoMsg = "[%s] [INFO] retrieved: %s\n" % (time.strftime("%X"), unsafeSQLIdentificatorNaming(table))
dataToStdout(infoMsg, True)
if conf.verbose in (1, 2):
status = '%d/%d items (%d%%)' % (threadData.shared.count, threadData.shared.limit, round(100.0 * threadData.shared.count / threadData.shared.limit))
dataToStdout("\r[%s] [INFO] tried %s" % (time.strftime("%X"), status), True)
kb.locks.io.release()
try:
runThreads(conf.threads, tableExistsThread, threadChoice=True)
except KeyboardInterrupt:
warnMsg = "user aborted during table existence "
warnMsg += "check. sqlmap will display partial output"
logger.warn(warnMsg)
clearConsoleLine(True)
dataToStdout("\n")
if not threadData.shared.value:
warnMsg = "no table(s) found"
logger.warn(warnMsg)
else:
for item in threadData.shared.value:
if conf.db not in kb.data.cachedTables:
kb.data.cachedTables[conf.db] = [item]
else:
kb.data.cachedTables[conf.db].append(item)
for _ in ((conf.db, item) for item in threadData.shared.value):
if _ not in kb.brute.tables:
kb.brute.tables.append(_)
hashDBWrite(HASHDB_KEYS.KB_BRUTE_TABLES, kb.brute.tables, True)
return kb.data.cachedTables
def columnExists(columnFile, regex=None):
if kb.columnExistsChoice is None and not any(_ for _ in kb.injection.data if _ not in (PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED)) and not conf.direct:
warnMsg = "it's not recommended to use '%s' and/or '%s' " % (PAYLOAD.SQLINJECTION[PAYLOAD.TECHNIQUE.TIME], PAYLOAD.SQLINJECTION[PAYLOAD.TECHNIQUE.STACKED])
warnMsg += "for common column existence check"
logger.warn(warnMsg)
message = "are you sure you want to continue? [y/N] "
test = readInput(message, default="N")
kb.columnExistsChoice = test[0] in ("y", "Y")
if not kb.columnExistsChoice:
return None
if not conf.tbl:
errMsg = "missing table parameter"
raise SqlmapMissingMandatoryOptionException(errMsg)
if conf.db and Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.DB2):
conf.db = conf.db.upper()
result = inject.checkBooleanExpression(safeStringFormat(BRUTE_COLUMN_EXISTS_TEMPLATE, (randomStr(), randomStr())))
if result:
errMsg = "can't use column existence check because of detected invalid results "
errMsg += "(most probably caused by inability of the used injection "
errMsg += "to distinguish errornous results)"
raise SqlmapDataException(errMsg)
infoMsg = "checking column existence using items from '%s'" % columnFile
logger.info(infoMsg)
columns = getFileItems(columnFile, unique=True)
columns.extend(_addPageTextWords())
columns = filterListValue(columns, regex)
table = safeSQLIdentificatorNaming(conf.tbl, True)
if conf.db and METADB_SUFFIX not in conf.db and Backend.getIdentifiedDbms() not in (DBMS.SQLITE, DBMS.ACCESS, DBMS.FIREBIRD):
table = "%s.%s" % (safeSQLIdentificatorNaming(conf.db), table)
kb.threadContinue = True
kb.bruteMode = True
threadData = getCurrentThreadData()
threadData.shared.count = 0
threadData.shared.limit = len(columns)
threadData.shared.value = []
def columnExistsThread():
threadData = getCurrentThreadData()
while kb.threadContinue:
kb.locks.count.acquire()
if threadData.shared.count < threadData.shared.limit:
column = safeSQLIdentificatorNaming(columns[threadData.shared.count])
threadData.shared.count += 1
kb.locks.count.release()
else:
kb.locks.count.release()
break
result = inject.checkBooleanExpression(safeStringFormat(BRUTE_COLUMN_EXISTS_TEMPLATE, (column, table)))
kb.locks.io.acquire()
if result:
threadData.shared.value.append(column)
if conf.verbose in (1, 2) and not hasattr(conf, "api"):
clearConsoleLine(True)
infoMsg = "[%s] [INFO] retrieved: %s\n" % (time.strftime("%X"), unsafeSQLIdentificatorNaming(column))
dataToStdout(infoMsg, True)
if conf.verbose in (1, 2):
status = "%d/%d items (%d%%)" % (threadData.shared.count, threadData.shared.limit, round(100.0 * threadData.shared.count / threadData.shared.limit))
dataToStdout("\r[%s] [INFO] tried %s" % (time.strftime("%X"), status), True)
kb.locks.io.release()
try:
runThreads(conf.threads, columnExistsThread, threadChoice=True)
except KeyboardInterrupt:
warnMsg = "user aborted during column existence "
warnMsg += "check. sqlmap will display partial output"
logger.warn(warnMsg)
clearConsoleLine(True)
dataToStdout("\n")
if not threadData.shared.value:
warnMsg = "no column(s) found"
logger.warn(warnMsg)
else:
columns = {}
for column in threadData.shared.value:
if Backend.getIdentifiedDbms() in (DBMS.MYSQL,):
result = not inject.checkBooleanExpression("%s" % safeStringFormat("EXISTS(SELECT %s FROM %s WHERE %s REGEXP '[^0-9]')", (column, table, column)))
else:
result = inject.checkBooleanExpression("%s" % safeStringFormat("EXISTS(SELECT %s FROM %s WHERE ROUND(%s)=ROUND(%s))", (column, table, column, column)))
if result:
columns[column] = "numeric"
else:
columns[column] = "non-numeric"
kb.data.cachedColumns[conf.db] = {conf.tbl: columns}
for _ in map(lambda x: (conf.db, conf.tbl, x[0], x[1]), columns.items()):
if _ not in kb.brute.columns:
kb.brute.columns.append(_)
hashDBWrite(HASHDB_KEYS.KB_BRUTE_COLUMNS, kb.brute.columns, True)
return kb.data.cachedColumns
| |
# This file is part of the MapProxy project.
# Copyright (C) 2011 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import datetime
import socket
import time
import hashlib
from mapproxy.image import ImageSource
from mapproxy.cache.base import (
TileCacheBase,
tile_buffer, CacheBackendError,)
from mapproxy.source import SourceError
from mapproxy.srs import SRS
from mapproxy.compat import string_type, iteritems, BytesIO
from threading import Lock
try:
import requests
except ImportError:
requests = None
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
json = None
import logging
log = logging.getLogger(__name__)
class UnexpectedResponse(CacheBackendError):
pass
class CouchDBCache(TileCacheBase):
def __init__(self, url, db_name,
file_ext, tile_grid, md_template=None,
tile_id_template=None):
if requests is None:
raise ImportError("CouchDB backend requires 'requests' package.")
if json is None:
raise ImportError("CouchDB backend requires 'simplejson' package or Python 2.6+.")
self.lock_cache_id = 'couchdb-' + hashlib.md5(url + db_name).hexdigest()
self.file_ext = file_ext
self.tile_grid = tile_grid
self.md_template = md_template
self.couch_url = '%s/%s' % (url.rstrip('/'), db_name.lower())
self.req_session = requests.Session()
self.req_session.timeout = 5
self.db_initialised = False
self.app_init_db_lock = Lock()
self.tile_id_template = tile_id_template
def init_db(self):
with self.app_init_db_lock:
if self.db_initialised:
return
try:
self.req_session.put(self.couch_url)
self.db_initialised = True
except requests.exceptions.RequestException as ex:
log.warn('unable to initialize CouchDB: %s', ex)
def tile_url(self, coord):
return self.document_url(coord) + '/tile'
def document_url(self, coord, relative=False):
x, y, z = coord
grid_name = self.tile_grid.name
couch_url = self.couch_url
if relative:
if self.tile_id_template:
if self.tile_id_template.startswith('%(couch_url)s/'):
tile_id_template = self.tile_id_template[len('%(couch_url)s/'):]
else:
tile_id_template = self.tile_id_template
return tile_id_template % locals()
else:
return '%(grid_name)s-%(z)s-%(x)s-%(y)s' % locals()
else:
if self.tile_id_template:
return self.tile_id_template % locals()
else:
return '%(couch_url)s/%(grid_name)s-%(z)s-%(x)s-%(y)s' % locals()
def is_cached(self, tile):
if tile.coord is None or tile.source:
return True
url = self.document_url(tile.coord)
try:
self.init_db()
resp = self.req_session.get(url)
if resp.status_code == 200:
doc = json.loads(resp.content)
tile.timestamp = doc.get(self.md_template.timestamp_key)
return True
except (requests.exceptions.RequestException, socket.error) as ex:
# is_cached should not fail (would abort seeding for example),
# so we catch these errors here and just return False
log.warn('error while requesting %s: %s', url, ex)
return False
if resp.status_code == 404:
return False
raise SourceError('%r: %r' % (resp.status_code, resp.content))
def _tile_doc(self, tile):
tile_id = self.document_url(tile.coord, relative=True)
if self.md_template:
tile_doc = self.md_template.doc(tile, self.tile_grid)
else:
tile_doc = {}
tile_doc['_id'] = tile_id
with tile_buffer(tile) as buf:
data = buf.read()
tile_doc['_attachments'] = {
'tile': {
'content_type': 'image/' + self.file_ext,
'data': data.encode('base64').replace('\n', ''),
}
}
return tile_id, tile_doc
def _store_bulk(self, tiles):
tile_docs = {}
for tile in tiles:
tile_id, tile_doc = self._tile_doc(tile)
tile_docs[tile_id] = tile_doc
duplicate_tiles = self._post_bulk(tile_docs)
if duplicate_tiles:
self._fill_rev_ids(duplicate_tiles)
self._post_bulk(duplicate_tiles, no_conflicts=True)
return True
def _post_bulk(self, tile_docs, no_conflicts=False):
"""
POST multiple tiles, returns all tile docs with conflicts during POST.
"""
doc = {'docs': tile_docs.values()}
data = json.dumps(doc)
self.init_db()
resp = self.req_session.post(self.couch_url + '/_bulk_docs', data=data, headers={'Content-type': 'application/json'})
if resp.status_code != 201:
raise UnexpectedResponse('got unexpected resp (%d) from CouchDB: %s' % (resp.status_code, resp.content))
resp_doc = json.loads(resp.content)
duplicate_tiles = {}
for tile in resp_doc:
if tile.get('error', 'false') == 'conflict':
duplicate_tiles[tile['id']] = tile_docs[tile['id']]
if no_conflicts and duplicate_tiles:
raise UnexpectedResponse('got unexpected resp (%d) from CouchDB: %s' % (resp.status_code, resp.content))
return duplicate_tiles
def _fill_rev_ids(self, tile_docs):
"""
Request all revs for tile_docs and insert it into the tile_docs.
"""
keys_doc = {'keys': tile_docs.keys()}
data = json.dumps(keys_doc)
self.init_db()
resp = self.req_session.post(self.couch_url + '/_all_docs', data=data, headers={'Content-type': 'application/json'})
if resp.status_code != 200:
raise UnexpectedResponse('got unexpected resp (%d) from CouchDB: %s' % (resp.status_code, resp.content))
resp_doc = json.loads(resp.content)
for tile in resp_doc['rows']:
tile_docs[tile['id']]['_rev'] = tile['value']['rev']
def store_tile(self, tile):
if tile.stored:
return True
return self._store_bulk([tile])
def store_tiles(self, tiles):
tiles = [t for t in tiles if not t.stored]
return self._store_bulk(tiles)
def load_tile_metadata(self, tile):
if tile.timestamp:
return
# is_cached loads metadata
self.is_cached(tile)
def load_tile(self, tile, with_metadata=False):
# bulk loading with load_tiles is not implemented, because
# CouchDB's /all_docs? does not include attachments
if tile.source or tile.coord is None:
return True
url = self.document_url(tile.coord) + '?attachments=true'
self.init_db()
resp = self.req_session.get(url, headers={'Accept': 'application/json'})
if resp.status_code == 200:
doc = json.loads(resp.content)
tile_data = BytesIO(doc['_attachments']['tile']['data'].decode('base64'))
tile.source = ImageSource(tile_data)
tile.timestamp = doc.get(self.md_template.timestamp_key)
return True
return False
def remove_tile(self, tile):
if tile.coord is None:
return True
url = self.document_url(tile.coord)
resp = requests.head(url)
if resp.status_code == 404:
# already removed
return True
rev_id = resp.headers['etag']
url += '?rev=' + rev_id.strip('"')
self.init_db()
resp = self.req_session.delete(url)
if resp.status_code == 200:
return True
return False
def utc_now_isoformat():
now = datetime.datetime.utcnow()
now = now.isoformat()
# remove milliseconds, add Zulu timezone
now = now.rsplit('.', 1)[0] + 'Z'
return now
class CouchDBMDTemplate(object):
def __init__(self, attributes):
self.attributes = attributes
for key, value in iteritems(attributes):
if value == '{{timestamp}}':
self.timestamp_key = key
break
else:
attributes['timestamp'] = '{{timestamp}}'
self.timestamp_key = 'timestamp'
def doc(self, tile, grid):
doc = {}
x, y, z = tile.coord
for key, value in iteritems(self.attributes):
if not isinstance(value, string_type) or not value.startswith('{{'):
doc[key] = value
continue
if value == '{{timestamp}}':
doc[key] = time.time()
elif value == '{{x}}':
doc[key] = x
elif value == '{{y}}':
doc[key] = y
elif value in ('{{z}}', '{{level}}'):
doc[key] = z
elif value == '{{utc_iso}}':
doc[key] = utc_now_isoformat()
elif value == '{{wgs_tile_centroid}}':
tile_bbox = grid.tile_bbox(tile.coord)
centroid = (
tile_bbox[0] + (tile_bbox[2]-tile_bbox[0])/2,
tile_bbox[1] + (tile_bbox[3]-tile_bbox[1])/2
)
centroid = grid.srs.transform_to(SRS(4326), centroid)
doc[key] = centroid
elif value == '{{tile_centroid}}':
tile_bbox = grid.tile_bbox(tile.coord)
centroid = (
tile_bbox[0] + (tile_bbox[2]-tile_bbox[0])/2,
tile_bbox[1] + (tile_bbox[3]-tile_bbox[1])/2
)
doc[key] = centroid
else:
raise ValueError('unknown CouchDB tile_metadata value: %r' % (value, ))
return doc
| |
import logging.config
import random
import socket
import struct
import threading
import os
from coapthon import defines
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.cachelayer import CacheLayer
from coapthon.layers.forwardLayer import ForwardLayer
from coapthon.layers.messagelayer import MessageLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.resourcelayer import ResourceLayer
from coapthon.messages.message import Message
from coapthon.messages.request import Request
from coapthon.resources.resource import Resource
from coapthon.serializer import Serializer
from coapthon.utils import Tree, create_logging
__author__ = 'Giacomo Tanganelli'
if not os.path.isfile("logging.conf"):
create_logging()
logger = logging.getLogger(__name__)
logging.config.fileConfig("logging.conf", disable_existing_loggers=False)
class CoAP(object):
"""
Implementation of the Forward Proxy
"""
def __init__(self, server_address, multicast=False, starting_mid=None, cache=False, sock=None):
"""
Initialize the Forward Proxy.
:param server_address: Server address for incoming connections
:param multicast: if the ip is a multicast address
:param starting_mid: used for testing purposes
:param cache: if a cache must be used
:param sock: if a socket has been created externally, it can be used directly
"""
self.stopped = threading.Event()
self.stopped.clear()
self.to_be_stopped = []
self.purge = threading.Thread(target=self.purge)
self.purge.start()
self.cache_enable = cache
self._messageLayer = MessageLayer(starting_mid)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
if self.cache_enable:
self._cacheLayer = CacheLayer(defines.FORWARD_PROXY)
else:
self._cacheLayer = None
self._forwardLayer = ForwardLayer(self)
self.resourceLayer = ResourceLayer(self)
# Resource directory
root = Resource('root', self, visible=False, observable=False, allow_children=True)
root.path = '/'
self.root = Tree()
self.root["/"] = root
self._serializer = None
self.server_address = server_address
self.multicast = multicast
addrinfo = socket.getaddrinfo(self.server_address[0], None)[0]
if sock is not None:
# Use given socket, could be a DTLS socket
self._socket = sock
elif self.multicast: # pragma: no cover
# Create a socket
# self._socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
# self._socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
# Join group
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((defines.ALL_COAP_NODES, self.server_address[1]))
mreq = struct.pack("4sl", socket.inet_aton(defines.ALL_COAP_NODES), socket.INADDR_ANY)
self._socket.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
self._unicast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._unicast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._unicast_socket.bind(self.server_address)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((defines.ALL_COAP_NODES_IPV6, self.server_address[1]))
addrinfo_multicast = socket.getaddrinfo(defines.ALL_COAP_NODES_IPV6, 5683)[0]
group_bin = socket.inet_pton(socket.AF_INET6, addrinfo_multicast[4][0])
mreq = group_bin + struct.pack('@I', 0)
self._socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
self._unicast_socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._unicast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._unicast_socket.bind(self.server_address)
else:
if addrinfo[0] == socket.AF_INET: # IPv4
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(self.server_address)
def purge(self):
"""
Clean old transactions
"""
while not self.stopped.isSet():
self.stopped.wait(timeout=defines.EXCHANGE_LIFETIME)
self._messageLayer.purge()
def listen(self, timeout=10):
"""
Listen for incoming messages. Timeout is used to check if the server must be switched off.
:param timeout: Socket Timeout in seconds
"""
self._socket.settimeout(float(timeout))
while not self.stopped.isSet():
try:
data, client_address = self._socket.recvfrom(4096)
except socket.timeout:
continue
try:
#Start a new thread not to block other requests
args = ((data, client_address), )
t = threading.Thread(target=self.receive_datagram, args=args)
t.daemon = True
t.start()
except RuntimeError:
logging.exception("Exception with Executor")
logging.debug("closing socket")
self._socket.close()
def close(self):
"""
Stop the server.
"""
logger.info("Stop server")
self.stopped.set()
for event in self.to_be_stopped:
event.set()
self._socket.close()
def receive_datagram(self, args):
"""
Handle messages coming from the udp socket.
:param args: (data, client_address)
"""
data, client_address = args
logging.debug("receiving datagram")
try:
host, port = client_address
except ValueError:
host, port, tmp1, tmp2 = client_address
client_address = (host, port)
serializer = Serializer()
message = serializer.deserialize(data, client_address)
if isinstance(message, int):
logger.error("receive_datagram - BAD REQUEST")
rst = Message()
rst.destination = client_address
rst.type = defines.Types["RST"]
rst.code = message
self.send_datagram(rst)
return
logger.debug("receive_datagram - " + str(message))
if isinstance(message, Request):
transaction = self._messageLayer.receive_request(message)
if transaction.request.duplicated and transaction.completed:
logger.debug("message duplicated,transaction completed")
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
elif transaction.request.duplicated and not transaction.completed:
logger.debug("message duplicated,transaction NOT completed")
self._send_ack(transaction)
return
transaction.separate_timer = self._start_separate_timer(transaction)
transaction = self._blockLayer.receive_request(transaction)
if transaction.block_transfer:
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
self.send_datagram(transaction.response)
return
transaction = self._observeLayer.receive_request(transaction)
"""
call to the cache layer to check if there's a cached response for the request
if not, call the forward layer
"""
if self._cacheLayer is not None:
transaction = self._cacheLayer.receive_request(transaction)
if transaction.cacheHit is False:
logging.debug(transaction.request)
transaction = self._forwardLayer.receive_request(transaction)
logging.debug(transaction.response)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
transaction = self._cacheLayer.send_response(transaction)
else:
transaction = self._forwardLayer.receive_request(transaction)
transaction = self._observeLayer.send_response(transaction)
transaction = self._blockLayer.send_response(transaction)
self._stop_separate_timer(transaction.separate_timer)
transaction = self._messageLayer.send_response(transaction)
if transaction.response is not None:
if transaction.response.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.response)
self.send_datagram(transaction.response)
elif isinstance(message, Message):
transaction = self._messageLayer.receive_empty(message)
if transaction is not None:
transaction = self._blockLayer.receive_empty(message, transaction)
self._observeLayer.receive_empty(message, transaction)
else: # is Response
logger.error("Received response from %s", message.source)
def send_datagram(self, message):
"""
Send a message through the udp socket.
:type message: Message
:param message: the message to send
"""
if not self.stopped.isSet():
host, port = message.destination
logger.debug("send_datagram - " + str(message))
serializer = Serializer()
message = serializer.serialize(message)
self._socket.sendto(message, (host, port))
def _start_retransmission(self, transaction, message):
"""
Start the retransmission task.
:type transaction: Transaction
:param transaction: the transaction that owns the message that needs retransmission
:type message: Message
:param message: the message that needs the retransmission task
"""
with transaction:
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
args=(transaction, message, future_time, 0))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
while retransmit_count < defines.MAX_RETRANSMIT and (not message.acknowledged and not message.rejected) \
and not self.stopped.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not self.stopped.isSet():
retransmit_count += 1
future_time *= 2
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
if message.observe is not None:
self._observeLayer.remove_subscriber(message)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
def _start_separate_timer(self, transaction):
"""
Start a thread to handle separate mode.
:type transaction: Transaction
:param transaction: the transaction that is in processing
:rtype : the Timer object
"""
t = threading.Timer(defines.ACK_TIMEOUT, self._send_ack, (transaction,))
t.start()
return t
@staticmethod
def _stop_separate_timer(timer):
"""
Stop the separate Thread if an answer has been already provided to the client.
:param timer: The Timer object
"""
timer.cancel()
def _send_ack(self, transaction):
"""
Sends an ACK message for the request.
:param transaction: the transaction that owns the request
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.request.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.request, ack)
self.send_datagram(ack)
| |
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Clusters DELF features using the K-means algorithm.
All DELF local feature descriptors for a given dataset's index images are loaded
as the input.
Note that:
- we only use features extracted from whole images (no features from boxes are
used).
- the codebook should be trained on Paris images for Oxford retrieval
experiments, and vice-versa.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from delf import feature_io
from delf.python.detect_to_retrieve import dataset
cmd_args = None
# Extensions.
_DELF_EXTENSION = '.delf'
# Default DELF dimensionality.
_DELF_DIM = 128
# Pace to report log when collecting features.
_STATUS_CHECK_ITERATIONS = 100
class _IteratorInitHook(tf.train.SessionRunHook):
"""Hook to initialize data iterator after session is created."""
def __init__(self):
super(_IteratorInitHook, self).__init__()
self.iterator_initializer_fn = None
def after_create_session(self, session, coord):
"""Initialize the iterator after the session has been created."""
del coord
self.iterator_initializer_fn(session)
def main(argv):
if len(argv) > 1:
raise RuntimeError('Too many command-line arguments.')
# Process output directory.
if os.path.exists(cmd_args.output_cluster_dir):
raise RuntimeError(
'output_cluster_dir = %s already exists. This may indicate that a '
'previous run already wrote checkpoints in this directory, which would '
'lead to incorrect training. Please re-run this script by specifying an'
' inexisting directory.' % cmd_args.output_cluster_dir)
else:
os.makedirs(cmd_args.output_cluster_dir)
# Read list of index images from dataset file.
print('Reading list of index images from dataset file...')
_, index_list, _ = dataset.ReadDatasetFile(cmd_args.dataset_file_path)
num_images = len(index_list)
print('done! Found %d images' % num_images)
# Loop over list of index images and collect DELF features.
features_for_clustering = []
start = time.clock()
print('Starting to collect features from index images...')
for i in range(num_images):
if i > 0 and i % _STATUS_CHECK_ITERATIONS == 0:
elapsed = (time.clock() - start)
print('Processing index image %d out of %d, last %d '
'images took %f seconds' %
(i, num_images, _STATUS_CHECK_ITERATIONS, elapsed))
start = time.clock()
features_filename = index_list[i] + _DELF_EXTENSION
features_fullpath = os.path.join(cmd_args.features_dir, features_filename)
_, _, features, _, _ = feature_io.ReadFromFile(features_fullpath)
if features.size != 0:
assert features.shape[1] == _DELF_DIM
for feature in features:
features_for_clustering.append(feature)
features_for_clustering = np.array(features_for_clustering, dtype=np.float32)
print('All features were loaded! There are %d features, each with %d '
'dimensions' %
(features_for_clustering.shape[0], features_for_clustering.shape[1]))
# Run K-means clustering.
def _get_input_fn():
"""Helper function to create input function and hook for training.
Returns:
input_fn: Input function for k-means Estimator training.
init_hook: Hook used to load data during training.
"""
init_hook = _IteratorInitHook()
def _input_fn():
"""Produces tf.data.Dataset object for k-means training.
Returns:
Tensor with the data for training.
"""
features_placeholder = tf.placeholder(tf.float32,
features_for_clustering.shape)
delf_dataset = tf.data.Dataset.from_tensor_slices((features_placeholder))
delf_dataset = delf_dataset.shuffle(1000).batch(
features_for_clustering.shape[0])
iterator = delf_dataset.make_initializable_iterator()
def _initializer_fn(sess):
"""Initialize dataset iterator, feed in the data."""
sess.run(
iterator.initializer,
feed_dict={features_placeholder: features_for_clustering})
init_hook.iterator_initializer_fn = _initializer_fn
return iterator.get_next()
return _input_fn, init_hook
input_fn, init_hook = _get_input_fn()
kmeans = tf.estimator.experimental.KMeans(
num_clusters=cmd_args.num_clusters,
model_dir=cmd_args.output_cluster_dir,
use_mini_batch=False,
)
print('Starting K-means clustering...')
start = time.clock()
for i in range(cmd_args.num_iterations):
kmeans.train(input_fn, hooks=[init_hook])
average_sum_squared_error = kmeans.evaluate(
input_fn, hooks=[init_hook])['score'] / features_for_clustering.shape[0]
elapsed = (time.clock() - start)
print('K-means iteration %d (out of %d) took %f seconds, '
'average-sum-of-squares: %f' %
(i, cmd_args.num_iterations, elapsed, average_sum_squared_error))
start = time.clock()
print('K-means clustering finished!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--dataset_file_path',
type=str,
default='/tmp/gnd_roxford5k.mat',
help="""
Dataset file for Revisited Oxford or Paris dataset, in .mat format. The
list of index images loaded from this file is used to collect local
features, which are assumed to be in <image_name>.delf file format.
""")
parser.add_argument(
'--features_dir',
type=str,
default='/tmp/features',
help="""
Directory where DELF feature files are to be found.
""")
parser.add_argument(
'--num_clusters',
type=int,
default=1024,
help="""
Number of clusters to use.
""")
parser.add_argument(
'--num_iterations',
type=int,
default=50,
help="""
Number of iterations to use.
""")
parser.add_argument(
'--output_cluster_dir',
type=str,
default='/tmp/cluster',
help="""
Directory where clustering outputs are written to. This directory should
not exist before running this script; it will be created during
clustering.
""")
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.