gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import unittest
import os
from contextlib import contextmanager
from hashlib import md5
import time
import pickle
import mock
from six.moves import urllib
from swift.common import direct_client
from swift.common.direct_client import DirectClientException
from swift.common.exceptions import ClientException
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import Timestamp, quote
from swift.common.swob import RESPONSE_REASONS
from swift.common.storage_policy import POLICIES
from six.moves.http_client import HTTPException
from test.unit import patch_policies, debug_logger
class FakeConn(object):
def __init__(self, status, headers=None, body='', **kwargs):
self.status = status
try:
self.reason = RESPONSE_REASONS[self.status][0]
except Exception:
self.reason = 'Fake'
self.body = body
self.resp_headers = HeaderKeyDict()
if headers:
self.resp_headers.update(headers)
self.etag = None
def _update_raw_call_args(self, *args, **kwargs):
capture_attrs = ('host', 'port', 'method', 'path', 'req_headers',
'query_string')
for attr, value in zip(capture_attrs, args[:len(capture_attrs)]):
setattr(self, attr, value)
return self
def getresponse(self):
if self.etag:
self.resp_headers['etag'] = str(self.etag.hexdigest())
if isinstance(self.status, Exception):
raise self.status
return self
def getheader(self, header, default=None):
return self.resp_headers.get(header, default)
def getheaders(self):
return self.resp_headers.items()
def read(self, amt=None):
if isinstance(self.body, io.BytesIO):
return self.body.read(amt)
elif amt is None:
return self.body
else:
return Exception('Not a StringIO entry')
def send(self, data):
if not self.etag:
self.etag = md5()
self.etag.update(data)
@contextmanager
def mocked_http_conn(*args, **kwargs):
fake_conn = FakeConn(*args, **kwargs)
mock_http_conn = lambda *args, **kwargs: \
fake_conn._update_raw_call_args(*args, **kwargs)
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
new=mock_http_conn):
yield fake_conn
@patch_policies
class TestDirectClient(unittest.TestCase):
def setUp(self):
self.node = json.loads(json.dumps({ # json roundtrip to ring-like
'ip': '1.2.3.4', 'port': '6200', 'device': 'sda',
'replication_ip': '1.2.3.5', 'replication_port': '7000'}))
self.part = '0'
self.account = u'\u062a account'
self.container = u'\u062a container'
self.obj = u'\u062a obj/name'
self.account_path = '/sda/0/%s' % urllib.parse.quote(
self.account.encode('utf-8'))
self.container_path = '/sda/0/%s/%s' % tuple(
urllib.parse.quote(p.encode('utf-8')) for p in (
self.account, self.container))
self.obj_path = '/sda/0/%s/%s/%s' % tuple(
urllib.parse.quote(p.encode('utf-8')) for p in (
self.account, self.container, self.obj))
self.user_agent = 'direct-client %s' % os.getpid()
class FakeTimeout(BaseException):
def __enter__(self):
return self
def __exit__(self, typ, value, tb):
pass
patcher = mock.patch.object(direct_client, 'Timeout', FakeTimeout)
patcher.start()
self.addCleanup(patcher.stop)
def test_gen_headers(self):
stub_user_agent = 'direct-client %s' % os.getpid()
headers = direct_client.gen_headers(add_ts=False)
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'X-Backend-Allow-Reserved-Names': 'true',
})
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('123.45')):
headers = direct_client.gen_headers()
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '0000000123.45000',
})
headers = direct_client.gen_headers(hdrs_in={'x-timestamp': '15'})
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '15',
})
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('12345.6789')):
headers = direct_client.gen_headers(hdrs_in={'foo-bar': '63'})
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'Foo-Bar': '63',
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '0000012345.67890',
})
hdrs_in = {'foo-bar': '55'}
headers = direct_client.gen_headers(hdrs_in, add_ts=False)
self.assertEqual(dict(headers), {
'User-Agent': stub_user_agent,
'Foo-Bar': '55',
'X-Backend-Allow-Reserved-Names': 'true',
})
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('12345')):
headers = direct_client.gen_headers(hdrs_in={'user-agent': '32'})
self.assertEqual(dict(headers), {
'User-Agent': '32',
'X-Backend-Allow-Reserved-Names': 'true',
'X-Timestamp': '0000012345.00000',
})
hdrs_in = {'user-agent': '47'}
headers = direct_client.gen_headers(hdrs_in, add_ts=False)
self.assertEqual(dict(headers), {
'User-Agent': '47',
'X-Backend-Allow-Reserved-Names': 'true',
})
for policy in POLICIES:
for add_ts in (True, False):
with mock.patch('swift.common.utils.Timestamp.now',
return_value=Timestamp('123456789')):
headers = direct_client.gen_headers(
{'X-Backend-Storage-Policy-Index': policy.idx},
add_ts=add_ts)
expected = {
'User-Agent': stub_user_agent,
'X-Backend-Storage-Policy-Index': str(policy.idx),
'X-Backend-Allow-Reserved-Names': 'true',
}
if add_ts:
expected['X-Timestamp'] = '0123456789.00000'
self.assertEqual(dict(headers), expected)
def test_direct_get_account(self):
def do_test(req_params):
stub_headers = HeaderKeyDict({
'X-Account-Container-Count': '1',
'X-Account-Object-Count': '1',
'X-Account-Bytes-Used': '1',
'X-Timestamp': '1234567890',
'X-PUT-Timestamp': '1234567890'})
body = b'[{"count": 1, "bytes": 20971520, "name": "c1"}]'
with mocked_http_conn(200, stub_headers, body) as conn:
resp_headers, resp = direct_client.direct_get_account(
self.node, self.part, self.account, **req_params)
try:
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(resp_headers, stub_headers)
self.assertEqual(json.loads(body), resp)
self.assertIn('format=json', conn.query_string)
for k, v in req_params.items():
if v is None:
self.assertNotIn('&%s' % k, conn.query_string)
else:
self.assertIn('&%s=%s' % (k, v), conn.query_string)
except AssertionError as err:
self.fail('Failed with params %s: %s' % (req_params, err))
test_params = (dict(marker=marker, prefix=prefix, delimiter=delimiter,
limit=limit, end_marker=end_marker, reverse=reverse)
for marker in (None, 'my-marker')
for prefix in (None, 'my-prefix')
for delimiter in (None, 'my-delimiter')
for limit in (None, 1000)
for end_marker in (None, 'my-endmarker')
for reverse in (None, 'on'))
for params in test_params:
do_test(params)
def test_direct_client_exception(self):
stub_headers = {'X-Trans-Id': 'txb5f59485c578460f8be9e-0053478d09'}
body = 'a server error has occurred'
with mocked_http_conn(500, stub_headers, body):
with self.assertRaises(ClientException) as raised:
direct_client.direct_get_account(self.node, self.part,
self.account)
self.assertEqual(raised.exception.http_status, 500)
expected_err_msg_parts = (
'Account server %s:%s' % (self.node['ip'], self.node['port']),
'GET %r' % self.account_path,
'status 500',
)
for item in expected_err_msg_parts:
self.assertIn(item, str(raised.exception))
self.assertEqual(raised.exception.http_host, self.node['ip'])
self.assertEqual(raised.exception.http_port, self.node['port'])
self.assertEqual(raised.exception.http_device, self.node['device'])
self.assertEqual(raised.exception.http_status, 500)
self.assertEqual(raised.exception.http_reason, 'Internal Error')
self.assertEqual(raised.exception.http_headers, stub_headers)
def test_direct_get_account_no_content_does_not_parse_body(self):
headers = {
'X-Account-Container-Count': '1',
'X-Account-Object-Count': '1',
'X-Account-Bytes-Used': '1',
'X-Timestamp': '1234567890',
'X-Put-Timestamp': '1234567890'}
with mocked_http_conn(204, headers) as conn:
resp_headers, resp = direct_client.direct_get_account(
self.node, self.part, self.account)
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertDictEqual(resp_headers, headers)
self.assertEqual([], resp)
def test_direct_get_account_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_get_account(
self.node, self.part, self.account)
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.account_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('GET' in str(raised.exception))
def test_direct_delete_account(self):
part = '0'
account = 'a'
mock_path = 'swift.common.bufferedhttp.http_connect_raw'
with mock.patch(mock_path) as fake_connect:
fake_connect.return_value.getresponse.return_value.status = 200
direct_client.direct_delete_account(self.node, part, account)
args, kwargs = fake_connect.call_args
ip = args[0]
self.assertEqual(self.node['ip'], ip)
port = args[1]
self.assertEqual(self.node['port'], port)
method = args[2]
self.assertEqual('DELETE', method)
path = args[3]
self.assertEqual('/sda/0/a', path)
headers = args[4]
self.assertIn('X-Timestamp', headers)
self.assertIn('User-Agent', headers)
def test_direct_delete_account_replication_net(self):
part = '0'
account = 'a'
mock_path = 'swift.common.bufferedhttp.http_connect_raw'
with mock.patch(mock_path) as fake_connect:
fake_connect.return_value.getresponse.return_value.status = 200
direct_client.direct_delete_account(
self.node, part, account,
headers={'X-Backend-Use-Replication-Network': 't'})
args, kwargs = fake_connect.call_args
ip = args[0]
self.assertEqual(self.node['replication_ip'], ip)
self.assertNotEqual(self.node['ip'], ip)
port = args[1]
self.assertEqual(self.node['replication_port'], port)
self.assertNotEqual(self.node['port'], port)
method = args[2]
self.assertEqual('DELETE', method)
path = args[3]
self.assertEqual('/sda/0/a', path)
headers = args[4]
self.assertIn('X-Timestamp', headers)
self.assertIn('User-Agent', headers)
def test_direct_delete_account_failure(self):
part = '0'
account = 'a'
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_account(self.node, part, account)
self.assertEqual(self.node['ip'], conn.host)
self.assertEqual(self.node['port'], conn.port)
self.assertEqual('DELETE', conn.method)
self.assertEqual('/sda/0/a', conn.path)
self.assertIn('X-Timestamp', conn.req_headers)
self.assertIn('User-Agent', conn.req_headers)
self.assertEqual(raised.exception.http_status, 500)
def test_direct_head_container(self):
headers = HeaderKeyDict(key='value')
with mocked_http_conn(200, headers) as conn:
resp = direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(headers, resp)
def test_direct_head_container_replication_net(self):
headers = HeaderKeyDict(key='value')
with mocked_http_conn(200, headers) as conn:
resp = direct_client.direct_head_container(
self.node, self.part, self.account, self.container,
headers={'X-Backend-Use-Replication-Network': 'on'})
self.assertEqual(conn.host, self.node['replication_ip'])
self.assertEqual(conn.port, self.node['replication_port'])
self.assertNotEqual(conn.host, self.node['ip'])
self.assertNotEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(headers, resp)
def test_direct_head_container_error(self):
headers = HeaderKeyDict(key='value')
with mocked_http_conn(503, headers) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
# check request
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(raised.exception.http_status, 503)
self.assertEqual(raised.exception.http_headers, headers)
self.assertTrue('HEAD' in str(raised.exception))
def test_direct_head_container_deleted(self):
important_timestamp = Timestamp.now().internal
headers = HeaderKeyDict({'X-Backend-Important-Timestamp':
important_timestamp})
with mocked_http_conn(404, headers) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(raised.exception.http_status, 404)
self.assertEqual(raised.exception.http_headers, headers)
def test_direct_get_container(self):
def do_test(req_params):
headers = HeaderKeyDict({'key': 'value'})
body = (b'[{"hash": "8f4e3", "last_modified": "317260", '
b'"bytes": 209}]')
with mocked_http_conn(200, headers, body) as conn:
resp_headers, resp = direct_client.direct_get_container(
self.node, self.part, self.account, self.container,
**req_params)
try:
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['user-agent'],
self.user_agent)
self.assertEqual(headers, resp_headers)
self.assertEqual(json.loads(body), resp)
self.assertIn('format=json', conn.query_string)
for k, v in req_params.items():
if v is None:
self.assertNotIn('&%s' % k, conn.query_string)
else:
self.assertIn('&%s=%s' % (k, v), conn.query_string)
except AssertionError as err:
self.fail('Failed with params %s: %s' % (req_params, err))
test_params = (dict(marker=marker, prefix=prefix, delimiter=delimiter,
limit=limit, end_marker=end_marker, reverse=reverse)
for marker in (None, 'my-marker')
for prefix in (None, 'my-prefix')
for delimiter in (None, 'my-delimiter')
for limit in (None, 1000)
for end_marker in (None, 'my-endmarker')
for reverse in (None, 'on'))
for params in test_params:
do_test(params)
def test_direct_get_container_no_content_does_not_decode_body(self):
headers = {}
body = ''
with mocked_http_conn(204, headers, body) as conn:
resp_headers, resp = direct_client.direct_get_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(headers, resp_headers)
self.assertEqual([], resp)
def test_direct_delete_container(self):
with mocked_http_conn(200) as conn:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
def test_direct_delete_container_replication_net(self):
with mocked_http_conn(200) as conn:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container,
headers={'X-Backend-Use-Replication-Network': '1'})
self.assertEqual(conn.host, self.node['replication_ip'])
self.assertEqual(conn.port, self.node['replication_port'])
self.assertNotEqual(conn.host, self.node['ip'])
self.assertNotEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
def test_direct_delete_container_with_timestamp(self):
# ensure timestamp is different from any that might be auto-generated
timestamp = Timestamp(time.time() - 100)
headers = {'X-Timestamp': timestamp.internal}
with mocked_http_conn(200) as conn:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container,
headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
self.assertTrue('X-Timestamp' in conn.req_headers)
self.assertEqual(timestamp, conn.req_headers['X-Timestamp'])
def test_direct_delete_container_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_container(
self.node, self.part, self.account, self.container)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('DELETE' in str(raised.exception))
def test_direct_put_container(self):
body = b'Let us begin with a quick introduction'
headers = {'x-foo': 'bar', 'Content-Length': str(len(body)),
'Content-Type': 'application/json',
'User-Agent': 'my UA'}
with mocked_http_conn(204) as conn:
rv = direct_client.direct_put_container(
self.node, self.part, self.account, self.container,
contents=body, headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['Content-Length'],
str(len(body)))
self.assertEqual(conn.req_headers['Content-Type'],
'application/json')
self.assertEqual(conn.req_headers['User-Agent'], 'my UA')
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertEqual(md5(body).hexdigest(), conn.etag.hexdigest())
self.assertIsNone(rv)
def test_direct_put_container_chunked(self):
body = b'Let us begin with a quick introduction'
headers = {'x-foo': 'bar', 'Content-Type': 'application/json'}
with mocked_http_conn(204) as conn:
rv = direct_client.direct_put_container(
self.node, self.part, self.account, self.container,
contents=body, headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.container_path)
self.assertEqual(conn.req_headers['Transfer-Encoding'], 'chunked')
self.assertEqual(conn.req_headers['Content-Type'],
'application/json')
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertNotIn('Content-Length', conn.req_headers)
expected_sent = b'%0x\r\n%s\r\n0\r\n\r\n' % (len(body), body)
self.assertEqual(md5(expected_sent).hexdigest(),
conn.etag.hexdigest())
self.assertIsNone(rv)
def test_direct_put_container_fail(self):
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
side_effect=Exception('conn failed')):
with self.assertRaises(Exception) as cm:
direct_client.direct_put_container(
self.node, self.part, self.account, self.container)
self.assertEqual('conn failed', str(cm.exception))
with mocked_http_conn(Exception('resp failed')):
with self.assertRaises(Exception) as cm:
direct_client.direct_put_container(
self.node, self.part, self.account, self.container)
self.assertEqual('resp failed', str(cm.exception))
def test_direct_put_container_object(self):
headers = {'x-foo': 'bar'}
with mocked_http_conn(204) as conn:
rv = direct_client.direct_put_container_object(
self.node, self.part, self.account, self.container, self.obj,
headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertIsNone(rv)
def test_direct_put_container_object_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_put_container_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('PUT' in str(raised.exception))
def test_direct_delete_container_object(self):
with mocked_http_conn(204) as conn:
rv = direct_client.direct_delete_container_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertIsNone(rv)
def test_direct_delete_container_obj_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_container_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('DELETE' in str(raised.exception))
def test_direct_head_object(self):
headers = HeaderKeyDict({'x-foo': 'bar'})
with mocked_http_conn(200, headers) as conn:
resp = direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj, headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual('bar', conn.req_headers.get('x-foo'))
self.assertIn('x-timestamp', conn.req_headers)
self.assertEqual(headers, resp)
def test_direct_head_object_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('HEAD' in str(raised.exception))
def test_direct_head_object_not_found(self):
important_timestamp = Timestamp.now().internal
stub_headers = {'X-Backend-Important-Timestamp': important_timestamp}
with mocked_http_conn(404, headers=stub_headers) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_head_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 404)
self.assertEqual(
raised.exception.http_headers['x-backend-important-timestamp'],
important_timestamp)
def test_direct_get_object(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200, body=contents) as conn:
resp_header, obj_body = direct_client.direct_get_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(obj_body, contents.getvalue())
def test_direct_get_object_error(self):
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_get_object(
self.node, self.part,
self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'GET')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('GET' in str(raised.exception))
def test_direct_get_object_chunks(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200, body=contents) as conn:
resp_header, obj_body = direct_client.direct_get_object(
self.node, self.part, self.account, self.container, self.obj,
resp_chunk_size=2)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual('GET', conn.method)
self.assertEqual(self.obj_path, conn.path)
self.assertEqual([b'12', b'34', b'56'], list(obj_body))
def test_direct_post_object(self):
headers = {'Key': 'value'}
resp_headers = []
with mocked_http_conn(200, resp_headers) as conn:
direct_client.direct_post_object(
self.node, self.part, self.account, self.container, self.obj,
headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'POST')
self.assertEqual(conn.path, self.obj_path)
for header in headers:
self.assertEqual(conn.req_headers[header], headers[header])
def test_direct_post_object_error(self):
headers = {'Key': 'value'}
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_post_object(
self.node, self.part, self.account, self.container,
self.obj, headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'POST')
self.assertEqual(conn.path, self.obj_path)
for header in headers:
self.assertEqual(conn.req_headers[header], headers[header])
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertTrue('x-timestamp' in conn.req_headers)
self.assertEqual(raised.exception.http_status, 500)
self.assertTrue('POST' in str(raised.exception))
def test_direct_delete_object(self):
with mocked_http_conn(200) as conn:
resp = direct_client.direct_delete_object(
self.node, self.part, self.account, self.container, self.obj)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertIsNone(resp)
def test_direct_delete_object_with_timestamp(self):
# ensure timestamp is different from any that might be auto-generated
timestamp = Timestamp(time.time() - 100)
headers = {'X-Timestamp': timestamp.internal}
with mocked_http_conn(200) as conn:
direct_client.direct_delete_object(
self.node, self.part, self.account, self.container, self.obj,
headers=headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertTrue('X-Timestamp' in conn.req_headers)
self.assertEqual(timestamp, conn.req_headers['X-Timestamp'])
def test_direct_delete_object_error(self):
with mocked_http_conn(503) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_delete_object(
self.node, self.part, self.account, self.container,
self.obj)
self.assertEqual(conn.method, 'DELETE')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 503)
self.assertTrue('DELETE' in str(raised.exception))
def test_direct_get_suffix_hashes(self):
data = {'a83': 'c130a2c17ed45102aada0f4eee69494ff'}
body = pickle.dumps(data)
with mocked_http_conn(200, {}, body) as conn:
resp = direct_client.direct_get_suffix_hashes(self.node,
self.part, ['a83'])
self.assertEqual(conn.method, 'REPLICATE')
self.assertEqual(conn.path, '/sda/0/a83')
self.assertEqual(conn.host, self.node['replication_ip'])
self.assertEqual(conn.port, self.node['replication_port'])
self.assertEqual(data, resp)
def _test_direct_get_suffix_hashes_fail(self, status_code):
with mocked_http_conn(status_code):
with self.assertRaises(DirectClientException) as cm:
direct_client.direct_get_suffix_hashes(
self.node, self.part, ['a83', 'b52'])
self.assertIn('REPLICATE', cm.exception.args[0])
self.assertIn(quote('/%s/%s/a83-b52'
% (self.node['device'], self.part)),
cm.exception.args[0])
self.assertIn(self.node['replication_ip'], cm.exception.args[0])
self.assertIn(self.node['replication_port'], cm.exception.args[0])
self.assertEqual(self.node['replication_ip'], cm.exception.http_host)
self.assertEqual(self.node['replication_port'], cm.exception.http_port)
self.assertEqual(self.node['device'], cm.exception.http_device)
self.assertEqual(status_code, cm.exception.http_status)
def test_direct_get_suffix_hashes_503(self):
self._test_direct_get_suffix_hashes_fail(503)
def test_direct_get_suffix_hashes_507(self):
self._test_direct_get_suffix_hashes_fail(507)
def test_direct_put_object_with_content_length(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, 6)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(md5(b'123456').hexdigest(), resp)
def test_direct_put_object_fail(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(500) as conn:
with self.assertRaises(ClientException) as raised:
direct_client.direct_put_object(
self.node, self.part, self.account, self.container,
self.obj, contents)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(raised.exception.http_status, 500)
def test_direct_put_object_chunked(self):
contents = io.BytesIO(b'123456')
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual(conn.method, 'PUT')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(md5(b'6\r\n123456\r\n0\r\n\r\n').hexdigest(), resp)
def test_direct_put_object_args(self):
# One test to cover all missing checks
contents = ""
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, etag="testing-etag", content_type='Text')
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual('PUT', conn.method)
self.assertEqual(self.obj_path, conn.path)
self.assertEqual(conn.req_headers['Content-Length'], '0')
self.assertEqual(conn.req_headers['Content-Type'], 'Text')
self.assertEqual(md5(b'0\r\n\r\n').hexdigest(), resp)
def test_direct_put_object_header_content_length(self):
contents = io.BytesIO(b'123456')
stub_headers = HeaderKeyDict({
'Content-Length': '6'})
with mocked_http_conn(200) as conn:
resp = direct_client.direct_put_object(
self.node, self.part, self.account, self.container, self.obj,
contents, headers=stub_headers)
self.assertEqual(conn.host, self.node['ip'])
self.assertEqual(conn.port, self.node['port'])
self.assertEqual('PUT', conn.method)
self.assertEqual(conn.req_headers['Content-length'], '6')
self.assertEqual(md5(b'123456').hexdigest(), resp)
def test_retry(self):
headers = HeaderKeyDict({'key': 'value'})
with mocked_http_conn(200, headers) as conn:
attempts, resp = direct_client.retry(
direct_client.direct_head_object, self.node, self.part,
self.account, self.container, self.obj)
self.assertEqual(conn.method, 'HEAD')
self.assertEqual(conn.path, self.obj_path)
self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
self.assertEqual(headers, resp)
self.assertEqual(attempts, 1)
def test_retry_client_exception(self):
logger = debug_logger('direct-client-test')
with mock.patch('swift.common.direct_client.sleep') as mock_sleep, \
mocked_http_conn(500) as conn:
with self.assertRaises(direct_client.ClientException) as err_ctx:
direct_client.retry(direct_client.direct_delete_object,
self.node, self.part,
self.account, self.container, self.obj,
retries=2, error_log=logger.error)
self.assertEqual('DELETE', conn.method)
self.assertEqual(err_ctx.exception.http_status, 500)
self.assertIn('DELETE', err_ctx.exception.args[0])
self.assertIn(self.obj_path,
err_ctx.exception.args[0])
self.assertIn(self.node['ip'], err_ctx.exception.args[0])
self.assertIn(self.node['port'], err_ctx.exception.args[0])
self.assertEqual(self.node['ip'], err_ctx.exception.http_host)
self.assertEqual(self.node['port'], err_ctx.exception.http_port)
self.assertEqual(self.node['device'], err_ctx.exception.http_device)
self.assertEqual(500, err_ctx.exception.http_status)
self.assertEqual([mock.call(1), mock.call(2)],
mock_sleep.call_args_list)
error_lines = logger.get_lines_for_level('error')
self.assertEqual(3, len(error_lines))
for line in error_lines:
self.assertIn('500 Internal Error', line)
def test_retry_http_exception(self):
logger = debug_logger('direct-client-test')
with mock.patch('swift.common.direct_client.sleep') as mock_sleep, \
mocked_http_conn(HTTPException('Kaboom!')) as conn:
with self.assertRaises(HTTPException) as err_ctx:
direct_client.retry(direct_client.direct_delete_object,
self.node, self.part,
self.account, self.container, self.obj,
retries=2, error_log=logger.error)
self.assertEqual('DELETE', conn.method)
self.assertEqual('Kaboom!', str(err_ctx.exception))
self.assertEqual([mock.call(1), mock.call(2)],
mock_sleep.call_args_list)
error_lines = logger.get_lines_for_level('error')
self.assertEqual(3, len(error_lines))
for line in error_lines:
self.assertIn('Kaboom!', line)
class TestUTF8DirectClient(TestDirectClient):
def setUp(self):
super(TestUTF8DirectClient, self).setUp()
self.account = self.account.encode('utf-8')
self.container = self.container.encode('utf-8')
self.obj = self.obj.encode('utf-8')
if __name__ == '__main__':
unittest.main()
|
|
import sys
import logging
from urllib2 import Request, urlopen, HTTPError
from urlparse import urlparse
from email.utils import formatdate
import PyV8, w3c
import BeautifulSoup
class Navigator(PyV8.JSClass):
log = logging.getLogger("navigator.base")
def __init__(self, win=None):
self._win = win
@property
def window(self):
return self._win
@property
def appCodeName(self):
"""the code name of the browser"""
raise NotImplementedError()
@property
def appName(self):
"""the name of the browser"""
raise NotImplementedError()
@property
def appVersion(self):
"""the version information of the browser"""
raise NotImplementedError()
@property
def cookieEnabled(self):
"""whether cookies are enabled in the browser"""
raise NotImplementedError()
@property
def platform(self):
"""which platform the browser is compiled"""
raise NotImplementedError()
@property
def userAgent(self):
"""the user-agent header sent by the browser to the server"""
raise NotImplementedError()
def javaEnabled(self):
"""whether or not the browser has Java enabled"""
raise NotImplementedError()
def taintEnabled(self):
"""whether or not the browser has data tainting enabled"""
raise NotImplementedError()
def fetch(self, url):
self.log.debug("fetching HTML from %s", url)
request = Request(url)
request.add_header('User-Agent', self.userAgent)
request.add_header('Referer', self._win.url)
if self._win.doc.cookie:
request.add_header('Cookie', self._win.doc.cookie)
response = urlopen(request)
if response.code != 200:
self.log.warn("fail to fetch HTML from %s, code=%d, msg=%s", url, response.code, response.msg)
raise HTTPError(url, response.code, "fail to fetch HTML", response.info(), 0)
headers = response.info()
kwds = { 'referer': self._win.url }
if headers.has_key('set-cookie'):
kwds['cookie'] = headers['set-cookie']
if headers.has_key('last-modified'):
kwds['lastModified'] = headers['last-modified']
return response.read(), kwds
class InternetExplorer(Navigator):
@property
def appCodeName(self):
return "Mozilla"
@property
def appName(self):
return "Microsoft Internet Explorer"
@property
def appVersion(self):
return "4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
@property
def cookieEnabled(self):
"""whether cookies are enabled in the browser"""
raise True
@property
def platform(self):
return "Win32"
@property
def userAgent(self):
return "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
def javaEnabled(self):
return False
def taintEnabled(self):
return False
@property
def userLanguage(self):
import locale
return locale.getdefaultlocale()[0]
class Location(PyV8.JSClass):
def __init__(self, win):
self.win = win
@property
def parts(self):
return urlparse(self.win.url)
@property
def href(self):
return self.win.url
@href.setter
def href(self, url):
self.win.open(url)
@property
def protocol(self):
return self.parts.scheme
@property
def host(self):
return self.parts.netloc
@property
def hostname(self):
return self.parts.hostname
@property
def port(self):
return self.parts.port
@property
def pathname(self):
return self.parts.path
@property
def search(self):
return self.parts.query
@property
def hash(self):
return self.parts.fragment
def assign(self, url):
"""Loads a new HTML document."""
self.win.open(url)
def reload(self):
"""Reloads the current page."""
self.win.open(self.win.url)
def replace(self, url):
"""Replaces the current document by loading another document at the specified URL."""
self.win.open(url)
class Screen(PyV8.JSClass):
def __init__(self, width, height, depth=32):
self._width = width
self._height = height
self._depth = depth
@property
def availWidth(self):
return self._width
@property
def availHeight(self):
return self._height
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def colorDepth(self):
return self._depth
@property
def pixelDepth(self):
return self._depth
class History(PyV8.JSClass):
def __init__(self, win):
self._win = win
self.urls = []
self.pos = None
@property
def window(self):
return self._win
@property
def length(self):
"""the number of URLs in the history list"""
return len(self.urls)
def back(self):
"""Loads the previous URL in the history list"""
return self.go(-1)
def forward(self):
"""Loads the next URL in the history list"""
return self.go(1)
def go(self, num_or_url):
"""Loads a specific URL from the history list"""
try:
off = int(num_or_url)
self.pos += off
self.pos = min(max(0, self.pos), len(self.urls)-1)
self._win.open(self.urls[self.pos])
except ValueError:
self._win.open(num_or_url)
def update(self, url, replace=False):
if self.pos is None:
self.urls.append(url)
self.pos = 0
elif replace:
self.urls[self.pos] = url
elif self.urls[self.pos] != url:
self.urls = self.urls[:self.pos+1]
self.urls.append(url)
self.pos += 1
class HtmlWindow(PyV8.JSClass):
log = logging.getLogger("html.window")
class Timer(object):
def __init__(self, code, repeat, lang='JavaScript'):
self.code = code
self.repeat = repeat
self.lang = lang
timers = []
def __init__(self, url, dom_or_doc, navigator_or_class=InternetExplorer, name="", target='_blank',
parent=None, opener=None, replace=False, screen=None, width=800, height=600, left=0, top=0, **kwds):
self.url = url
self.doc = w3c.getDOMImplementation(dom_or_doc, **kwds) if isinstance(dom_or_doc, BeautifulSoup.BeautifulSoup) else dom_or_doc
self.doc.window = self
self._navigator = navigator_or_class(self) if type(navigator_or_class) == type else navigator_or_class
self._location = Location(self)
self._history = History(self)
self._history.update(url, replace)
self._target = target
self._parent = parent
self._opener = opener
self._screen = screen or Screen(width, height, 32)
self._closed = False
self.name = name
self.defaultStatus = ""
self.status = ""
self._left = left
self._top = top
self.innerWidth = width
self.innerHeight = height
self.outerWidth = width
self.outerHeight = height
@property
def closed(self):
"""whether a window has been closed or not"""
return self._closed
def close(self):
"""Closes the current window"""
self._closed = True
@property
def window(self):
return self
@property
def document(self):
return self.doc
def _findAll(self, tags):
return self.doc.doc.findAll(tags, recursive=True)
@property
def frames(self):
"""an array of all the frames (including iframes) in the current window"""
return w3c.HTMLCollection(self.doc, [self.doc.createHTMLElement(self.doc, f) for f in self._findAll(['frame', 'iframe'])])
@property
def length(self):
"""the number of frames (including iframes) in a window"""
return len(self._findAll(['frame', 'iframe']))
@property
def history(self):
"""the History object for the window"""
return self._history
@property
def location(self):
"""the Location object for the window"""
return self._location
@property
def navigator(self):
"""the Navigator object for the window"""
return self._navigator
@property
def opener(self):
"""a reference to the window that created the window"""
return self._opener
@property
def pageXOffset(self):
return 0
@property
def pageYOffset(self):
return 0
@property
def parent(self):
return self._parent
@property
def screen(self):
return self._screen
@property
def screenLeft(self):
return self._left
@property
def screenTop(self):
return self._top
@property
def screenX(self):
return self._left
@property
def screenY(self):
return self._top
@property
def self(self):
return self
@property
def top(self):
return self
def alert(self, msg):
"""Displays an alert box with a message and an OK button"""
print "ALERT: ", msg
def confirm(self, msg):
"""Displays a dialog box with a message and an OK and a Cancel button"""
ret = raw_input("CONFIRM: %s [Y/n] " % msg)
return ret in ['', 'y', 'Y', 't', 'T']
def focus(self):
"""Sets focus to the current window"""
pass
def blur(self):
"""Removes focus from the current window"""
pass
def moveBy(self, x, y):
"""Moves a window relative to its current position"""
pass
def moveTo(self, x, y):
"""Moves a window to the specified position"""
pass
def resizeBy(self, w, h):
"""Resizes the window by the specified pixels"""
pass
def resizeTo(self, w, h):
"""Resizes the window to the specified width and height"""
pass
def scrollBy(self, xnum, ynum):
"""Scrolls the content by the specified number of pixels"""
pass
def scrollTo(self, xpos, ypos):
"""Scrolls the content to the specified coordinates"""
pass
def setTimeout(self, code, interval, lang="JavaScript"):
timer = HtmlWindow.Timer(code, False, lang)
self.timers.append((interval, timer))
return len(self.timers)-1
def clearTimeout(self, idx):
self.timers[idx] = None
def setInterval(self, code, interval, lang="JavaScript"):
timer = HtmlWindow.Timer(code, True, lang)
self.timers.append((interval, timer))
return len(self.timers)-1
def clearInterval(self, idx):
self.timers[idx] = None
def createPopup(self):
raise NotImplementedError()
def open(self, url=None, name='_blank', specs='', replace=False):
self.log.info("window.open(url='%s', name='%s', specs='%s')", url, name, specs)
if url:
html, kwds = self._navigator.fetch(url)
else:
url = 'about:blank'
html = ''
kwds = {}
dom = BeautifulSoup.BeautifulSoup(html)
for spec in specs.split(','):
spec = [s.strip() for s in spec.split('=')]
if len(spec) == 2:
if spec[0] in ['width', 'height', 'left', 'top']:
kwds[spec[0]] = int(spec[1])
if name in ['_blank', '_parent', '_self', '_top']:
kwds['target'] = name
name = ''
else:
kwds['target'] = '_blank'
return HtmlWindow(url, dom, self._navigator, name, parent=self, opener=self, replace=replace, **kwds)
@property
def context(self):
if not hasattr(self, "_context"):
self._context = PyV8.JSContext(self)
return self._context
def evalScript(self, script, tag=None):
if isinstance(script, unicode):
script = script.encode('utf-8')
if tag:
self.doc.current = tag
else:
body = self.doc.body
self.doc.current = body.tag.contents[-1] if body else self.doc.doc.contents[-1]
self.log.debug("executing script: %s", script)
with self.context as ctxt:
ctxt.eval(script)
def fireOnloadEvents(self):
for tag in self._findAll('script'):
self.evalScript(tag.string, tag=tag)
body = self.doc.body
if body and body.tag.has_key('onload'):
self.evalScript(body.tag['onload'], tag=body.tag.contents[-1])
if hasattr(self, 'onload'):
self.evalScript(self.onload)
def fireExpiredTimer(self):
pass
def Image(self):
return self.doc.createElement('img')
import unittest
TEST_URL = 'http://localhost:8080/path?query=key#frag'
TEST_HTML = """
<html>
<head>
<title></title>
</head>
<body onload='load()'>
<frame src="#"/>
<iframe src="#"/>
<script>
function load()
{
alert('onload');
}
document.write("<p id='hello'>world</p>");
</script>
</body>
</html>
"""
class HtmlWindowTest(unittest.TestCase):
def setUp(self):
self.doc = w3c.parseString(TEST_HTML)
self.win = HtmlWindow(TEST_URL, self.doc)
def testWindow(self):
self.assertEquals(self.doc, self.win.document)
self.assertEquals(self.win, self.win.window)
self.assertEquals(self.win, self.win.self)
self.assertFalse(self.win.closed)
self.win.close()
self.assert_(self.win.closed)
self.assertEquals(2, self.win.frames.length)
self.assertEquals(2, self.win.length)
self.assertEquals(1, self.win.history.length)
loc = self.win.location
self.assert_(loc)
self.assertEquals("frag", loc.hash)
self.assertEquals("localhost:8080", loc.host)
self.assertEquals("localhost", loc.hostname)
self.assertEquals(TEST_URL, loc.href)
self.assertEquals("/path", loc.pathname)
self.assertEquals(8080, loc.port)
self.assertEquals("http", loc.protocol)
self.assertEquals("query=key", loc.search)
def testOpen(self):
url = 'http://www.google.com'
win = self.win.open(url, specs="width=640, height=480")
self.assertEquals(url, win.url)
self.assert_(win.document)
self.assertEquals(url, win.document.URL)
self.assertEquals('www.google.com', win.document.domain)
self.assertEquals(640, win.innerWidth)
self.assertEquals(480, win.innerHeight)
def testScript(self):
self.win.fireOnloadEvents()
tag = self.doc.getElementById('hello')
self.assertEquals(u'P', tag.nodeName)
def testTimer(self):
pass
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG if "-v" in sys.argv else logging.WARN,
format='%(asctime)s %(levelname)s %(message)s')
unittest.main()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import platform
import stat
import unittest
from telemetry import decorators
from telemetry.internal.platform.tracing_agent import chrome_tracing_agent
from telemetry.internal.platform.tracing_agent import (
chrome_tracing_devtools_manager)
from telemetry.timeline import tracing_config
from devil.android import device_utils
class FakeTracingControllerBackend(object):
def __init__(self):
self.is_tracing_running = False
class FakePlatformBackend(object):
def __init__(self):
self.tracing_controller_backend = FakeTracingControllerBackend()
def GetOSName(self):
return ''
class FakeAndroidPlatformBackend(FakePlatformBackend):
def __init__(self):
super(FakeAndroidPlatformBackend, self).__init__()
devices = device_utils.DeviceUtils.HealthyDevices(None)
self.device = devices[0]
def GetOSName(self):
return 'android'
class FakeDesktopPlatformBackend(FakePlatformBackend):
def GetOSName(self):
system = platform.system()
if system == 'Linux':
return 'linux'
if system == 'Darwin':
return 'mac'
if system == 'Windows':
return 'win'
class FakeDevtoolsClient(object):
def __init__(self, remote_port):
self.is_alive = True
self.is_tracing_running = False
self.remote_port = remote_port
self.will_raise_exception_in_stop_tracing = False
def IsAlive(self):
return self.is_alive
def StartChromeTracing(self, trace_options, filter_string, timeout=10):
del trace_options, filter_string, timeout # unused
self.is_tracing_running = True
def StopChromeTracing(self, trace_data_builder):
del trace_data_builder # unused
self.is_tracing_running = False
if self.will_raise_exception_in_stop_tracing:
raise Exception
def IsChromeTracingSupported(self):
return True
class ChromeTracingAgentTest(unittest.TestCase):
def setUp(self):
self.platform1 = FakePlatformBackend()
self.platform2 = FakePlatformBackend()
self.platform3 = FakePlatformBackend()
def StartTracing(self, platform_backend, enable_chrome_trace=True):
assert chrome_tracing_agent.ChromeTracingAgent.IsSupported(platform_backend)
agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
config = tracing_config.TracingConfig()
config.tracing_category_filter.AddIncludedCategory('foo')
config.enable_chrome_trace = enable_chrome_trace
agent._platform_backend.tracing_controller_backend.is_tracing_running = True
agent.StartAgentTracing(config, 10)
return agent
def StopTracing(self, agent):
agent._platform_backend.tracing_controller_backend.is_tracing_running = (
False)
agent.StopAgentTracing(None)
def testRegisterDevtoolsClient(self):
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(1), self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(2), self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(3), self.platform1)
tracing_agent_of_platform1 = self.StartTracing(self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(4), self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(5), self.platform2)
self.StopTracing(tracing_agent_of_platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(6), self.platform1)
def testIsSupportWithoutStartupTracingSupport(self):
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool2, self.platform2)
devtool2.is_alive = False
# Chrome tracing is only supported on platform 1 since only platform 1 has
# an alive devtool.
self.assertTrue(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
@decorators.Enabled('linux', 'mac', 'win')
def testIsSupportOnDesktopPlatform(self):
# Chrome tracing is always supported on desktop platforms because of startup
# tracing.
desktop_platform = FakeDesktopPlatformBackend()
self.assertTrue(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(desktop_platform))
devtool = FakeDevtoolsClient(1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool, desktop_platform)
self.assertTrue(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(desktop_platform))
def testStartAndStopTracing(self):
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
devtool3 = FakeDevtoolsClient(3)
devtool4 = FakeDevtoolsClient(2)
# Register devtools 1, 2, 3 on platform1 and devtool 4 on platform 2
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool2, self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool3, self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool4, self.platform2)
devtool2.is_alive = False
tracing_agent1 = self.StartTracing(self.platform1)
with self.assertRaises(chrome_tracing_agent.ChromeTracingStartedError):
self.StartTracing(self.platform1)
self.assertTrue(devtool1.is_tracing_running)
self.assertFalse(devtool2.is_tracing_running)
self.assertTrue(devtool3.is_tracing_running)
# Devtool 4 shouldn't have tracing started although it has the same remote
# port as devtool 2
self.assertFalse(devtool4.is_tracing_running)
self.StopTracing(tracing_agent1)
self.assertFalse(devtool1.is_tracing_running)
self.assertFalse(devtool2.is_tracing_running)
self.assertFalse(devtool3.is_tracing_running)
self.assertFalse(devtool4.is_tracing_running)
# Test that it should be ok to start & stop tracing on platform1 again.
tracing_agent1 = self.StartTracing(self.platform1)
self.StopTracing(tracing_agent1)
tracing_agent2 = self.StartTracing(self.platform2)
self.assertTrue(devtool4.is_tracing_running)
self.StopTracing(tracing_agent2)
self.assertFalse(devtool4.is_tracing_running)
def testExceptionRaisedInStopTracing(self):
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
# Register devtools 1, 2 on platform 1
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool2, self.platform1)
tracing_agent1 = self.StartTracing(self.platform1)
self.assertTrue(devtool1.is_tracing_running)
self.assertTrue(devtool2.is_tracing_running)
devtool1.will_raise_exception_in_stop_tracing = True
with self.assertRaises(chrome_tracing_agent.ChromeTracingStoppedError):
self.StopTracing(tracing_agent1)
# Tracing is stopped on both devtools clients even if there is exception.
self.assertIsNone(tracing_agent1.trace_config)
self.assertFalse(devtool1.is_tracing_running)
self.assertFalse(devtool2.is_tracing_running)
devtool1.is_alive = False
devtool2.is_alive = False
# Register devtools 3 on platform 1 should not raise any exception.
devtool3 = FakeDevtoolsClient(3)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool3, self.platform1)
# Start & Stop tracing on platform 1 should work just fine.
tracing_agent2 = self.StartTracing(self.platform1)
self.StopTracing(tracing_agent2)
@decorators.Enabled('android')
def testCreateAndRemoveTraceConfigFileOnAndroid(self):
platform_backend = FakeAndroidPlatformBackend()
agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
self.assertIsNone(agent.trace_config_file)
config = tracing_config.TracingConfig()
agent._CreateTraceConfigFile(config)
self.assertIsNotNone(agent.trace_config_file)
self.assertTrue(platform_backend.device.PathExists(agent.trace_config_file))
config_file_str = platform_backend.device.ReadFile(agent.trace_config_file,
as_root=True)
self.assertEqual(agent._CreateTraceConfigFileString(config),
config_file_str.strip())
config_file_path = agent.trace_config_file
agent._RemoveTraceConfigFile()
self.assertFalse(platform_backend.device.PathExists(config_file_path))
self.assertIsNone(agent.trace_config_file)
# robust to multiple file removal
agent._RemoveTraceConfigFile()
self.assertFalse(platform_backend.device.PathExists(config_file_path))
self.assertIsNone(agent.trace_config_file)
@decorators.Enabled('linux', 'mac', 'win')
def testCreateAndRemoveTraceConfigFileOnDesktop(self):
platform_backend = FakeDesktopPlatformBackend()
agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
self.assertIsNone(agent.trace_config_file)
config = tracing_config.TracingConfig()
agent._CreateTraceConfigFile(config)
self.assertIsNotNone(agent.trace_config_file)
self.assertTrue(os.path.exists(agent.trace_config_file))
self.assertTrue(os.stat(agent.trace_config_file).st_mode & stat.S_IROTH)
with open(agent.trace_config_file, 'r') as f:
config_file_str = f.read()
self.assertEqual(agent._CreateTraceConfigFileString(config),
config_file_str.strip())
config_file_path = agent.trace_config_file
agent._RemoveTraceConfigFile()
self.assertFalse(os.path.exists(config_file_path))
self.assertIsNone(agent.trace_config_file)
# robust to multiple file removal
agent._RemoveTraceConfigFile()
self.assertFalse(os.path.exists(config_file_path))
self.assertIsNone(agent.trace_config_file)
|
|
from datetime import datetime
from app import app
from app.authentication import with_login
from flask import Blueprint, jsonify, request, Response
from app.generate_csv import generate_csv_clean
from app.msol_util import get_next_update_estimation_message_aws
from app.es.awsmetric import AWSMetric
from app.es.awsstat import AWSStat
from app.es.awsdetailedlineitem import AWSDetailedLineitem
from app.aws_keys import with_multiple_aws_accounts
from dateutil.relativedelta import relativedelta
from app.generate_csv import generate_csv
from app.cache import compressed_json, decompressed_json, cache, with_cache
from hashlib import sha256
from .. import AWS_KEY_PROCESSING_INTERVAL_HOURS
import itertools
import calendar
import config
aws_cost_stats_bp = Blueprint('aws_cost_stats_bp', __name__)
def cut_cost_by_product(products, cut):
res = []
other = {'product': 'Other Services', 'cost': 0}
i = 0
for p in products:
if i < cut and p['cost'] >= 0.01:
res.append(p)
else:
other['cost'] += p['cost']
i += 1
if other['cost'] >= 0.01:
res.append(other)
return res
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycost', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycost/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycost(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
total_cost:
type: number
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
data = AWSDetailedLineitem.get_monthly_cost(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)
return jsonify(data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/totalcost/<string:time_arg>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_totalcost(accounts, time_arg):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get total cost
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
total_cost:
type: number
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
this_day = now.replace(hour=0, minute=0, second=0, microsecond=0)
this_month = this_day.replace(day=1)
time_val = {
'ever': AWSDetailedLineitem.get_first_date([account.get_aws_user_id() for account in accounts]),
'currentyear': this_month - relativedelta(months=this_month.month),
'currentmonth': this_month,
}
date_from = time_val.get(time_arg, now)
date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)
return jsonify(raw_data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregion', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregion/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyregion(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by region
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
region:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost_by_region(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['intervals']['buckets']
res = [
{
'month': data['key_as_string'].split('T')[0],
'regions': [
{
'region': region['key'],
'cost': region['cost']['value'],
}
for region in data['regions']['buckets']
],
}
for data in raw_data
]
if 'csv' in request.args:
return Response(generate_csv(res, 'regions', 'region'), mimetype='text/csv')
return jsonify(months=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyregionbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by region for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
region:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost_by_region(keys=[account.get_aws_user_id() for account in accounts],
byaccount=True,
date_from=date_from,
date_to=date_to)['accounts']['buckets']
res = [
{
'account_id': account['key'],
'account_name': [a.pretty for a in accounts if account['key'] == a.get_aws_user_id()][0],
'months': [
{
'month': data['key_as_string'].split('T')[0],
'regions': [
{
'region': region['key'],
'cost': region['cost']['value'],
}
for region in data['regions']['buckets']
],
}
for data in account['intervals']['buckets']
]
}
for account in raw_data
]
if 'csv' in request.args:
return Response(generate_csv(res, 'regions', 'region', account=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbytagbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbytagbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyregionbytagbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by region for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
region:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost_by_region(keys=[account.get_aws_user_id() for account in accounts],
tagged=True,
byaccount=True,
date_from=date_from,
date_to=date_to)['accounts']['buckets']
def tagged_cost(bucket, total):
total_tag = 0.0
for tag in bucket:
total_tag += tag['cost']['value']
yield (tag['key'], tag['cost']['value'])
if total != total_tag:
yield ('untagged', total - total_tag)
res = [
{
'account_id': account['key'],
'account_name': [a.pretty for a in accounts if a.get_aws_user_id() == account['key']][0],
'months': [
{
'month': data['key_as_string'].split('T')[0],
'regions': [
{
'region': region['key'],
'tags': [
{
'name': tag[0],
'cost': tag[1],
}
for tag in tagged_cost(region['tags']['buckets'], region['cost']['value'])
],
}
for region in data['regions']['buckets']
],
}
for data in account['intervals']['buckets']
]
}
for account in raw_data
]
if 'csv' in request.args:
return Response(generate_csv(res, 'regions', 'region', account=True, tagged=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/dailycostbyproduct', defaults={'nb_days': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/dailycostbyproduct/<int:nb_days>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_dailycostbyproduct(accounts, nb_days):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get daily costs summed by product
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
days:
type: array
items:
properties:
day:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow().replace(hour=23, minute=59, second=59, microsecond=999999)
now = AWSDetailedLineitem.get_last_date([account.get_aws_user_id() for account in accounts], limit=now)
date_from = now.replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta(days=nb_days)
date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999) - relativedelta(days=1)
data = AWSDetailedLineitem.get_daily_cost_by_product(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['days']
for d in data:
d['products'] = cut_cost_by_product(sorted(d['products'], key=lambda x: x['cost'], reverse=True), int(request.args['show']) - 1 if 'show' in request.args else 9)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(days=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproduct', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproduct/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyproduct(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by product
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow().replace(hour=23, minute=59, second=59, microsecond=999999)
now = AWSDetailedLineitem.get_last_date([account.get_aws_user_id() for account in accounts], limit=now)
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1], hour=23, minute=59, second=59, microsecond=999999)
data = AWSDetailedLineitem.get_monthly_cost_by_product(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['months']
for d in data:
if 'csv' not in request.args:
d['products'] = cut_cost_by_product(sorted(d['products'], key=lambda x: x['cost'], reverse=True), int(request.args['show']) - 1 if 'show' in request.args else 9)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
if 'csv' in request.args:
return Response(generate_csv(data, 'products', 'product'), mimetype='text/csv')
return jsonify(months=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyproductbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by product for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
month = nb_months - 1
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=month)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
res = [
{
'account_id': account.get_aws_user_id(),
'account_name': account.pretty,
'months': AWSDetailedLineitem.get_monthly_cost_by_product(keys=account.get_aws_user_id(),
date_from=date_from,
date_to=date_to)['months'],
}
for account in accounts
]
if 'csv' in request.args:
return Response(generate_csv(res, 'products', 'product', account=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbytagbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbytagbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyproductbytagbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by product for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
month = nb_months - 1
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=month)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
res = [
{
'account_id': account.get_aws_user_id(),
'account_name': account.pretty,
'months': AWSDetailedLineitem.get_monthly_cost_by_product(keys=account.get_aws_user_id(),
tagged=True,
date_from=date_from,
date_to=date_to)['months'],
}
for account in accounts
]
if 'csv' in request.args:
return Response(generate_csv(res, 'products', 'product', account=True, tagged=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/yearlycostbyproduct', defaults={'nb_years': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/yearlycostbyproduct/<int:nb_years>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_yearlycostbyproduct(accounts, nb_years):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get yearly costs summed by product
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
years:
type: array
items:
properties:
year:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(years=nb_years - 1)
date_to = now.replace(month=12, day=31, hour=23, minute=59, second=59, microsecond=999999)
data = AWSDetailedLineitem.get_yearly_cost_by_product(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['years']
for d in data:
d['products'] = cut_cost_by_product(sorted(d['products'], key=lambda x: x['cost'], reverse=True), int(request.args['show']) - 1 if 'show' in request.args else 9)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(years=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/months')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_months(accounts):
raw_data = AWSDetailedLineitem.get_first_to_last_date([account.get_aws_user_id() for account in accounts])
if not raw_data:
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(months=[data.strftime("%Y-%m-01") for data in raw_data])
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/categories')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_month_categories_m(accounts, month):
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
except:
return jsonify(error='Not found.'), 404
raw_data = AWSDetailedLineitem.get_cost_by_resource([account.get_aws_user_id() for account in accounts], date_from=date_from)
cat = []
max_cat = 0
for new in raw_data:
x = 1
while new['cost'] > x:
x *= 10
if x >= max_cat:
max_cat = x
elif '<{}'.format(x) not in cat:
cat.append('<{}'.format(x))
cat.append('>{}'.format(max_cat / 10))
return jsonify(categories=cat)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/chart')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_month_chart_m(accounts, month):
# TODO: Use ES agg to categorize
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
except:
return jsonify(error='Not found.'), 404
raw_data = [
AWSDetailedLineitem.get_cost_by_resource(account.get_aws_user_id(), date_from=date_from)
for account in accounts
]
data = []
def get_cat_with_cost(cost):
x = 1
while cost > x:
x *= 10
return x
def add_resource_in_data(new):
new_cat = get_cat_with_cost(new['cost'])
for cat in data:
if cat['category'] == '<{}'.format(new_cat):
cat['total'] += new['cost']
return
data.append(dict(category='<{}'.format(new_cat), total=new['cost']))
for one in raw_data:
for new in one:
add_resource_in_data(new)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
max_cat = 0
for i in range(len(data)):
if len(data[i]['category']) > len(data[max_cat]['category']):
max_cat = i
data[max_cat]['category'] = data[max_cat]['category'][:-1]
data[max_cat]['category'] = data[max_cat]['category'].replace('<', '>', 1)
return jsonify(categories=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/<category>')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_m(accounts, month, category):
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
assert category[0] in ['<', '>']
cat = int(category[1:])
except:
return jsonify(error='Not found.'), 404
raw_data = AWSDetailedLineitem.get_cost_by_resource([account.get_aws_user_id() for account in accounts], date_from=date_from)
def transform(r):
r['resource_name'] = r['resource']
return r
minus = category[0] == '<'
data = [
transform(r)
for r in raw_data
if (minus and cat > r['cost'] >= cat / 10) or (not minus and r['cost'] > cat)
]
if len(data) <= 0:
return jsonify(error='Not found.'), 404
return jsonify(category=dict(resources=data, total=sum([x['cost'] for x in data])))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/search/<search>')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_search_m(accounts, month, search):
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
except:
return jsonify(error='Not found.'), 404
raw_data = [
AWSDetailedLineitem.get_cost_by_resource(account.get_aws_user_id(), date_from=date_from, search=search)
for account in accounts
]
def transform(r):
r['resource_name'] = r['resource']
return r
data = [
transform(r)
for raw in raw_data
for r in raw
]
if not len(data):
return jsonify(error='Not found.'), 404
return jsonify(search_result=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/tags')
@with_login()
@with_multiple_aws_accounts()
def aws_get_resource_tags(accounts):
tags = AWSDetailedLineitem.get_available_tags([account.get_aws_user_id() for account in accounts])['tags']
if not len(tags):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(tags=sorted(tags, key=unicode.lower))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/tags_only_with_data')
@with_login()
@with_multiple_aws_accounts()
def aws_get_resource_tags_with_data(accounts):
tags = list(set(itertools.chain.from_iterable(
AWSDetailedLineitem.get_available_tags(account.get_aws_user_id(), only_with_data=account.key)['tags']
for account in accounts
)))
if not len(tags):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(tags=sorted(tags, key=unicode.lower))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbytag/<path:tag>', defaults={'nb_months': 5})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbytag/<path:tag>/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_tags_months(accounts, nb_months, tag):
date_to = datetime.now()
date_from = date_to.replace(day=1, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
return jsonify(AWSDetailedLineitem.get_monthly_cost_by_tag([account.get_aws_user_id() for account in accounts], tag, date_from=date_from, date_to=date_to))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/underutilized')
@with_login()
@with_multiple_aws_accounts()
def aws_underutilized_resources(accounts):
return jsonify(AWSMetric.underutilized_resources(account.key for account in accounts))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/underutilizedreducedcost')
@with_login()
@with_multiple_aws_accounts()
def aws_underutilized_resources_reduced_cost(accounts):
now = datetime.utcnow()
date_from = now.replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=6)
date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999)
resources = AWSMetric.underutilized_resources(account.key for account in accounts)
resource_ids = set(r['id'] for r in resources['resources'])
months = AWSDetailedLineitem.get_monthly_cost_by_resource(resource_ids, date_from=date_from, date_to=date_to)
res = { # Simply multiply every cost by 20% as all instances usage is
k: v * 0.2 # less than 20%. TODO: intelligently find the best type
for k, v in months.iteritems()
}
return jsonify(res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/usagecost')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_usagecost(accounts):
def get_account_data(account):
for date, cpu_usage in dict(AWSMetric.daily_cpu_utilization(account.key)).iteritems():
yield (date, cpu_usage, None)
for date, cost in dict(AWSDetailedLineitem.get_ec2_daily_cost(account.get_aws_user_id())).iteritems():
yield (date, None, cost)
@with_cache()
def get_all_account_data():
return list(
itertools.chain.from_iterable(
get_account_data(account)
for account in accounts
)
)
data = get_all_account_data()
days = {}
for day, cpu_usage, cost in data:
day_data = days.setdefault(day, {'day': day, 'cpu': None, 'cost': None})
if cpu_usage is not None:
day_data['cpu'] = (day_data['cpu'] or 0.0) + cpu_usage
if cost is not None:
day_data['cost'] = (day_data['cost'] or 0.0) + cost
res = sorted([
value
for value in days.itervalues()
if value['cpu'] is not None and value['cost'] is not None # Comment/remove if None values are OK
], key=lambda e: e['day'])
if not res:
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(days=res)
def _build_list_used_transfer_types(stat_list):
return frozenset(
elem['type']
for bucket in stat_list
for elem in bucket['transfer_stats']
)
def _check_if_in_list(dict_list, value, key):
return next((item for item in dict_list if item[key] == value), None)
def _append_to_header_list(header_list, new_data):
for elem in new_data:
if elem not in header_list:
header_list.append(elem)
return header_list
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/s3buckettags')
@with_login()
@with_multiple_aws_accounts()
def aws_get_resource_tags_for_s3(accounts):
tags = list(set(itertools.chain.from_iterable(
AWSDetailedLineitem.get_available_tags(
account.get_aws_user_id(),
product_name='Simple Storage Service',
)['tags']
for account in accounts
)))
return jsonify(tags=sorted(tags, key=unicode.lower))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/s3bucketsizepername')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_s3bucketsizepername(accounts):
"""---
get:
tags:
- aws
produces:
- application/csv
description: &desc Stats about cost and usage of bandwith and storag on s3 buckets, organised by name
summary: *desc
responses:
200:
description: Stats about cost and usage of bandwith and storag on s3 buckets, organised by name
403:
description: Not logged in
404:
description: AWS account not registered
"""
def _create_bandwith_breakdown(transfer_types_list, csv_row, bucket_bandwith_stat):
for elem in transfer_types_list:
_current_transfer_type = _check_if_in_list(bucket_bandwith_stat['transfer_stats'], elem, 'type')
if _current_transfer_type is not None:
csv_row[elem] = _current_transfer_type['data'] * 1024 * 1024 * 1024 # The is by default given in GB
return csv_row
def _create_csv_rows(bucket_list, account, bandwith_cost, csv_row_all):
if bucket_list is None:
return []
for bucket in bucket_list['buckets']:
csv_row = {
'account_id': account.get_aws_user_id(),
'used_space': bucket['used_space'],
'name': bucket['name'],
'storage_cost': _check_if_in_list(bucket['prices'], bucket['provider'], 'provider')['cost']
}
bucket_bandwith_stat = _check_if_in_list(bandwith_cost, bucket['name'], 'bucket_name')
if bucket_bandwith_stat is not None:
csv_row = _create_bandwith_breakdown(transfer_types_list, csv_row, bucket_bandwith_stat)
csv_row['bandwith_cost'] = bucket_bandwith_stat['cost'] if bucket_bandwith_stat is not None else 0
csv_row['total_cost'] = csv_row['storage_cost'] + csv_row['bandwith_cost']
csv_row_all.append(csv_row)
return csv_row_all
assert len(accounts) > 0
csv_header = ['account_id', 'name', 'used_space', 'storage_cost', 'bandwith_cost', 'total_cost']
csv_row_all = []
for account in accounts:
bucket_list = AWSStat.latest_s3_space_usage(account)
bucket_ids = [
bucket['name']
for bucket in (bucket_list['buckets'] if bucket_list is not None else [])
]
bandwith_cost = AWSDetailedLineitem.get_s3_bandwith_info_and_cost_per_name(account.get_aws_user_id(), bucket_ids)
transfer_types_list = _build_list_used_transfer_types(bandwith_cost)
csv_header = _append_to_header_list(csv_header, transfer_types_list)
csv_row_all = _create_csv_rows(bucket_list, account, bandwith_cost, csv_row_all)
if len(csv_row_all) > 0 and csv_row_all[0] is None:
csv_row_all = []
if 'csv' in request.args:
return Response(generate_csv_clean(csv_row_all, csv_header))
return jsonify(accounts=csv_row_all)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/s3bucketsizepertag/<path:tag>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_s3bucketsizepertag(accounts, tag):
"""---
get:
tags:
- aws
produces:
- application/csv
description: &desc Stats about cost and usage of bandwith and storag on s3 buckets, organised by tag
summary: *desc
responses:
200:
description: Stats about cost and usage of bandwith and storag on s3 buckets, organised by tag
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
def _get_total_sizes_cost_and_names(bucket_names_list, bucket_list):
total_size = 0
total_cost = 0
names = ""
for bucket in bucket_list['buckets']:
if _check_if_in_list(bucket_names_list, bucket['name'], 'bucket_name') is not None:
total_size += float(bucket['used_space'])
total_cost += _check_if_in_list(bucket['prices'], bucket['provider'], 'provider')['cost']
names += bucket['name'] + ", "
return total_size, names[:-2], total_cost
def _get_bandwith_info(account, bucket_names):
bucket_ids = [
bucket
for bucket in (bucket_names if isinstance(bucket_names, list) else [bucket_names])
]
bandwith_cost = AWSDetailedLineitem.get_s3_bandwith_info_and_cost_per_name(account.get_aws_user_id(), bucket_ids)
return bandwith_cost
def _iterate_over_buckets_in_tag_for_total(bucket_bandwith_stat):
total_cost = 0
for bucket in (bucket_bandwith_stat if bucket_bandwith_stat is not None else []):
total_cost += bucket['cost']
return total_cost
def _iterate_over_buckets_and_make_breakdown_bandwith_stat(bucket_bandwith_stat, buff_row_csv, tag_value):
bandwith_cost = 0
for bucket in bucket_bandwith_stat:
bandwith_cost += bucket['cost']
for elem in bucket['transfer_stats']:
if elem['type'] in buff_row_csv:
buff_row_csv[elem['type']] += (elem['data'] * 1024 * 1024 * 1024)
else:
buff_row_csv[elem['type']] = (elem['data'] * 1024 * 1024 * 1024)
buff_row_csv['bandwith_cost'] = bandwith_cost
return buff_row_csv
def _build_csv_row_and_add_header(bucket_list_tagged, bucket_list, account, csv_header, csv_row_all):
if bucket_list_tagged is None:
return [], []
for tag_value in bucket_list_tagged['tag_value']:
bucket_info = _get_total_sizes_cost_and_names(tag_value['s3_buckets'], bucket_list)
bucket_bandwith_stat = _get_bandwith_info(account, bucket_info[1])
csv_header = _append_to_header_list(csv_header, _build_list_used_transfer_types(bucket_bandwith_stat))
csv_row = {
"tag_key": bucket_list_tagged['tag_key'].split(':')[1],
"tag_value": tag_value['tag_value'],
"account_id": tag_value['s3_buckets'][0]["account_id"],
"total_size": bucket_info[0],
"bucket_names": bucket_info[1],
"storage_cost": bucket_info[2],
}
csv_row = _iterate_over_buckets_and_make_breakdown_bandwith_stat(bucket_bandwith_stat, csv_row, tag_value)
csv_row['total_cost'] = csv_row['storage_cost'] + csv_row['bandwith_cost']
csv_row_all.append(csv_row)
return csv_header, csv_row_all
def _select_bucket_list_tag(bucket_list_per_tag, tag):
for bucket_list_tagged in bucket_list_per_tag:
if tag in bucket_list_tagged['tag_key'].split(':')[1]:
return bucket_list_tagged
csv_header = ["account_id", "tag_key", "tag_value", "total_size", "bucket_names", "bandwith_cost", "storage_cost", "total_cost"]
csv_data = []
for account in accounts:
bucket_list_per_tag = AWSDetailedLineitem.get_s3_buckets_per_tag(account.get_aws_user_id())
bucket_list_tagged = _select_bucket_list_tag(bucket_list_per_tag, tag)
bucket_list = AWSStat.latest_s3_space_usage(account)
csv_header, csv_data = _build_csv_row_and_add_header(bucket_list_tagged, bucket_list, account, csv_header, csv_data)
if 'csv' in request.args:
return Response(generate_csv_clean(csv_data, csv_header))
return jsonify(res=csv_data)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class link_delay(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors/neighbor/subTLVs/subTLVs/link-delay. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines unidirectional link delay.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "link-delay"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"link-delay",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state (container)
YANG Description: State parameters of IS Extended Reachability sub-TLV 33.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IS Extended Reachability sub-TLV 33.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class link_delay(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors/neighbor/subTLVs/subTLVs/link-delay. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines unidirectional link delay.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "link-delay"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"link-delay",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state (container)
YANG Description: State parameters of IS Extended Reachability sub-TLV 33.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor/subTLVs/subTLVs/link_delay/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IS Extended Reachability sub-TLV 33.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
|
|
"""Diff processing and filtering logic."""
import re
from reviewboard.diffviewer.diffutils import (get_diff_data_chunks_info,
split_line_endings)
from reviewboard.diffviewer.features import filter_interdiffs_v2_feature
#: Regex for matching a diff chunk line.
#:
#: Deprecated:
#: 3.0.18:
#: This has been replaced with
#: :py:data:`reviewboard.diffviewer.diffutils.CHUNK_RANGE_RE`. Its group
#: names differ from this version.
CHUNK_RANGE_RE = re.compile(
br'^@@ -(?P<orig_start>\d+)(,(?P<orig_len>\d+))? '
br'\+(?P<new_start>\d+)(,(?P<new_len>\d+))? @@',
re.M)
def filter_interdiff_opcodes(opcodes, filediff_data, interfilediff_data,
request=None):
"""Filter the opcodes for an interdiff to remove unnecessary lines.
An interdiff may contain lines of code that have changed as the result of
updates to the tree between the time that the first and second diff were
created. This leads to some annoyances when reviewing.
This function will filter the opcodes to remove as much of this as
possible. It will only output non-"equal" opcodes if it falls into the
ranges of lines dictated in the uploaded diff files.
Version Changed:
3.0.18:
Added the ``request`` argument, and added support for the version 2
algorithm from Review Board 4.0 (through the :py:data:`~reviewboard
.diffviewer.features.filter_interdiffs_v2_feature` feature).
Args:
opcodes (list of tuple):
The list of opcodes to filter.
filediff_data (bytes):
The data from the filediff to filter.
interfilediff_data (bytes):
The data from the interfilediff to filter.
request (django.http.HttpRequest, optional):
The HTTP request from the client.
Yields:
tuple:
An opcode to render for the diff.
"""
def _find_range_info_v1(diff):
lines = split_line_endings(diff)
process_changes = False
process_trailing_context = False
ranges = []
for range_info in get_diff_data_chunks_info(diff):
orig_info = range_info['orig']
modified_info = range_info['modified']
orig_pre_lines_of_context = orig_info['pre_lines_of_context']
orig_post_lines_of_context = orig_info['post_lines_of_context']
modified_pre_lines_of_context = \
modified_info['pre_lines_of_context']
modified_post_lines_of_context = \
modified_info['post_lines_of_context']
if modified_pre_lines_of_context and orig_pre_lines_of_context:
pre_lines_of_context = min(orig_pre_lines_of_context,
modified_pre_lines_of_context)
else:
pre_lines_of_context = (modified_pre_lines_of_context or
orig_pre_lines_of_context)
if modified_post_lines_of_context and orig_post_lines_of_context:
post_lines_of_context = min(orig_post_lines_of_context,
modified_post_lines_of_context)
else:
post_lines_of_context = (modified_post_lines_of_context or
orig_post_lines_of_context)
start = modified_info['chunk_start'] + pre_lines_of_context
if pre_lines_of_context > 0:
start -= 1
length = (modified_info['chunk_len'] - pre_lines_of_context -
post_lines_of_context)
ranges.append((start, start + length))
return ranges
def _find_range_info_v2(diff):
ranges = []
for range_info in get_diff_data_chunks_info(diff):
orig_info = range_info['orig']
modified_info = range_info['modified']
orig_pre_lines_of_context = orig_info['pre_lines_of_context']
orig_post_lines_of_context = orig_info['post_lines_of_context']
modified_pre_lines_of_context = \
modified_info['pre_lines_of_context']
modified_post_lines_of_context = \
modified_info['post_lines_of_context']
if modified_pre_lines_of_context and orig_pre_lines_of_context:
pre_lines_of_context = min(orig_pre_lines_of_context,
modified_pre_lines_of_context)
else:
pre_lines_of_context = (modified_pre_lines_of_context or
orig_pre_lines_of_context)
if modified_post_lines_of_context and orig_post_lines_of_context:
post_lines_of_context = min(orig_post_lines_of_context,
modified_post_lines_of_context)
else:
post_lines_of_context = (modified_post_lines_of_context or
orig_post_lines_of_context)
start = modified_info['chunk_start'] + pre_lines_of_context
if pre_lines_of_context > 0:
start -= 1
length = (modified_info['chunk_len'] - pre_lines_of_context -
post_lines_of_context)
ranges.append((start, start + length))
return ranges
def _is_range_valid(line_range, tag, i1, i2):
return (line_range is not None and
i1 >= line_range[0] and
(tag == 'delete' or i1 != i2))
use_v2_algorithm = \
filter_interdiffs_v2_feature.is_enabled(request=request)
if use_v2_algorithm:
_find_range_info = _find_range_info_v2
else:
_find_range_info = _find_range_info_v1
orig_ranges = _find_range_info(filediff_data)
new_ranges = _find_range_info(interfilediff_data)
orig_range_i = 0
new_range_i = 0
if orig_ranges:
orig_range = orig_ranges[orig_range_i]
else:
orig_range = None
if new_ranges:
new_range = new_ranges[new_range_i]
else:
new_range = None
if not orig_range and not new_range:
# There's nothing in here, or it's not a unified diff. Just yield
# what we get.
for tag, i1, i2, j1, j2 in opcodes:
yield tag, i1, i2, j1, j2
return
for tag, i1, i2, j1, j2 in opcodes:
while orig_range and i1 > orig_range[1]:
# We've left the range of the current chunk to consider in the
# original diff. Move on to the next one.
orig_range_i += 1
if orig_range_i < len(orig_ranges):
orig_range = orig_ranges[orig_range_i]
else:
orig_range = None
while new_range and j1 > new_range[1]:
# We've left the range of the current chunk to consider in the
# new diff. Move on to the next one.
new_range_i += 1
if new_range_i < len(new_ranges):
new_range = new_ranges[new_range_i]
else:
new_range = None
# See if the chunk we're looking at is in the range of the chunk in
# one of the uploaded diffs. If so, allow it through.
orig_starts_valid = _is_range_valid(orig_range, tag, i1, i2)
new_starts_valid = _is_range_valid(new_range, tag, j1, j2)
if tag in ('equal', 'replace'):
valid_chunk = orig_starts_valid or new_starts_valid
elif tag == 'delete':
valid_chunk = orig_starts_valid
elif tag == 'insert':
valid_chunk = new_starts_valid
if valid_chunk:
# This chunk is valid. It may only be a portion of the real
# chunk, though. We'll need to split it up into a known valid
# segment first, and yield that.
if orig_range:
cap_i2 = orig_range[1] + 1
else:
cap_i2 = i2
if new_range:
cap_j2 = new_range[1] + 1
else:
cap_j2 = j2
if orig_starts_valid:
valid_i2 = min(i2, cap_i2)
else:
valid_i2 = i2
if new_starts_valid:
valid_j2 = min(j2, cap_j2)
else:
valid_j2 = j2
if tag in ('equal', 'replace'):
# We need to take care to not let the replace lines have
# differing ranges for the orig and modified files. We want the
# replace to take up the full bounds of the two sides, but
# capped to the valid chunk range.
#
# For this, we need to pick a consistent value for the length
# of the range. We know at least one side will be within
# bounds, since we have a valid chunk and at least one is
# capped to be <= the end of the range.
#
# If one side is out of bounds of the range, the other range
# will win. If both are in bounds, the largest wins.
i_diff = valid_i2 - i1
j_diff = valid_j2 - j1
if valid_i2 > cap_i2:
# Sanity-check that valid_j2 is in bounds. We don't need
# to check this in the following conditionals, though,
# since that's covered by the conditionals themselves.
assert valid_j2 <= cap_j2
max_cap = j_diff
elif valid_j2 > cap_j2:
max_cap = i_diff
else:
max_cap = max(i_diff, j_diff)
# Set each valid range to be the same length.
valid_i2 = i1 + max_cap
valid_j2 = j1 + max_cap
# Update the caps, so that we'll process whatever we've
# chopped off.
cap_i2 = valid_i2
cap_j2 = valid_j2
yield tag, i1, valid_i2, j1, valid_j2
if valid_i2 == i2 and valid_j2 == j2:
continue
# There were more parts of this range remaining. We know they're
# all invalid, so let's update i1 and j1 to point to the start
# of those invalid ranges, and mark them.
if orig_range is not None and i2 + 1 > cap_i2:
i1 = cap_i2
if new_range is not None and j2 + 1 > cap_j2:
j1 = cap_j2
valid_chunk = False
if not valid_chunk:
# Turn this into an "filtered-equal" chunk. The left-hand and
# right-hand side of the diffs will look different, which may be
# noticeable, but it will still help the user pay attention to
# what's actually changed that they care about.
#
# These will get turned back into "equal" chunks in the
# post-processing step.
yield 'filtered-equal', i1, i2, j1, j2
def post_process_filtered_equals(opcodes):
"""Post-processes filtered-equal and equal chunks from interdiffs.
Any filtered-out "filtered-equal" chunks will get turned back into "equal"
chunks and merged into any prior equal chunks. Likewise, simple "equal"
chunks will also get merged.
"equal" chunks that have any indentation information will remain
their own chunks, with nothing merged in.
"""
cur_chunk = None
for tag, i1, i2, j1, j2, meta in opcodes:
if ((tag == 'equal' and not meta.get('indentation_changes')) or
tag == 'filtered-equal'):
# We either have a plain equal chunk without any indentation
# changes, or a filtered-equal chunk. In these cases, we can
# safely merge the chunks together and transform them into
# an "equal" chunk.
if cur_chunk:
i1 = cur_chunk[1]
j1 = cur_chunk[3]
meta = cur_chunk[5]
cur_chunk = ('equal', i1, i2, j1, j2, meta)
else:
# This is some sort of changed chunk (insert, delete, replace,
# or equal with indentation changes). Yield the previous chunk
# we were working with, if any, and then yield the current chunk.
if cur_chunk:
yield cur_chunk
cur_chunk = None
yield tag, i1, i2, j1, j2, meta
if cur_chunk:
yield cur_chunk
|
|
try:
from AG_fft_tools import correlate2d,fast_ffts
from AG_fft_tools import dftups,upsample_image,shift
except ImportError:
from image_registration.fft_tools import correlate2d,fast_ffts
from image_registration.fft_tools import dftups,upsample_image,shift
import warnings
import numpy as np
__all__ = ['register_images']
def register_images(im1, im2, usfac=1, return_registered=False,
return_error=False, zeromean=True, DEBUG=False, maxoff=None,
nthreads=1, use_numpy_fft=False):
"""
Sub-pixel image registration (see dftregistration for lots of details)
Parameters
----------
im1 : np.ndarray
im2 : np.ndarray
The images to register.
usfac : int
upsampling factor; governs accuracy of fit (1/usfac is best accuracy)
return_registered : bool
Return the registered image as the last parameter
return_error : bool
Does nothing at the moment, but in principle should return the "fit
error" (it does nothing because I don't know how to compute the "fit
error")
zeromean : bool
Subtract the mean from the images before cross-correlating? If no, you
may get a 0,0 offset because the DC levels are strongly correlated.
maxoff : int
Maximum allowed offset to measure (setting this helps avoid spurious
peaks)
DEBUG : bool
Test code used during development. Should DEFINITELY be removed.
Returns
-------
dx,dy : float,float
REVERSE of dftregistration order (also, signs flipped) for consistency
with other routines.
Measures the amount im2 is offset from im1 (i.e., shift im2 by these #'s
to match im1)
"""
if not im1.shape == im2.shape:
raise ValueError("Images must have same shape.")
if zeromean:
im1 = im1 - (im1[im1==im1].mean())
im2 = im2 - (im2[im2==im2].mean())
if np.any(np.isnan(im1)):
im1 = im1.copy()
im1[im1!=im1] = 0
if np.any(np.isnan(im2)):
im2 = im2.copy()
im2[im2!=im2] = 0
fft2,ifft2 = fftn,ifftn = fast_ffts.get_ffts(nthreads=nthreads, use_numpy_fft=use_numpy_fft)
im1fft = fft2(im1)
im2fft = fft2(im2)
output = dftregistration(im1fft,im2fft,usfac=usfac,
return_registered=return_registered,
return_error=return_error, zeromean=zeromean,
DEBUG=DEBUG, maxoff=maxoff)
output = [-output[1], -output[0], ] + [o for o in output[2:]]
if return_registered:
output[-1] = np.abs(np.fft.ifftshift(ifft2(output[-1])))
return output
def dftregistration(buf1ft,buf2ft,usfac=1, return_registered=False,
return_error=False, zeromean=True, DEBUG=False, maxoff=None,
nthreads=1, use_numpy_fft=False):
"""
translated from matlab:
http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation/content/html/efficient_subpixel_registration.html
Efficient subpixel image registration by crosscorrelation. This code
gives the same precision as the FFT upsampled cross correlation in a
small fraction of the computation time and with reduced memory
requirements. It obtains an initial estimate of the crosscorrelation peak
by an FFT and then refines the shift estimation by upsampling the DFT
only in a small neighborhood of that estimate by means of a
matrix-multiply DFT. With this procedure all the image points are used to
compute the upsampled crosscorrelation.
Manuel Guizar - Dec 13, 2007
Portions of this code were taken from code written by Ann M. Kowalczyk
and James R. Fienup.
J.R. Fienup and A.M. Kowalczyk, "Phase retrieval for a complex-valued
object by using a low-resolution image," J. Opt. Soc. Am. A 7, 450-458
(1990).
Citation for this algorithm:
Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms," Opt. Lett. 33,
156-158 (2008).
Inputs
buf1ft Fourier transform of reference image,
DC in (1,1) [DO NOT FFTSHIFT]
buf2ft Fourier transform of image to register,
DC in (1,1) [DO NOT FFTSHIFT]
usfac Upsampling factor (integer). Images will be registered to
within 1/usfac of a pixel. For example usfac = 20 means the
images will be registered within 1/20 of a pixel. (default = 1)
Outputs
output = [error,diffphase,net_row_shift,net_col_shift]
error Translation invariant normalized RMS error between f and g
diffphase Global phase difference between the two images (should be
zero if images are non-negative).
net_row_shift net_col_shift Pixel shifts between images
Greg (Optional) Fourier transform of registered version of buf2ft,
the global phase difference is compensated for.
"""
# this function is translated from matlab, so I'm just going to pretend
# it is matlab/pylab
from numpy import conj,abs,arctan2,sqrt,real,imag,shape,zeros,trunc,ceil,floor,fix
from numpy.fft import fftshift,ifftshift
fft2,ifft2 = fftn,ifftn = fast_ffts.get_ffts(nthreads=nthreads, use_numpy_fft=use_numpy_fft)
# Compute error for no pixel shift
if usfac == 0:
raise ValueError("Upsample Factor must be >= 1")
CCmax = sum(sum(buf1ft * conj(buf2ft)));
rfzero = sum(abs(buf1ft)**2);
rgzero = sum(abs(buf2ft)**2);
error = 1.0 - CCmax * conj(CCmax)/(rgzero*rfzero);
error = sqrt(abs(error));
diffphase=arctan2(imag(CCmax),real(CCmax));
output=[error,diffphase];
# Whole-pixel shift - Compute crosscorrelation by an IFFT and locate the
# peak
elif usfac == 1:
[m,n]=shape(buf1ft);
CC = ifft2(buf1ft * conj(buf2ft));
if maxoff is None:
rloc,cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
CCmax=CC[rloc,cloc];
else:
# set the interior of the shifted array to zero
# (i.e., ignore it)
CC[maxoff:-maxoff,:] = 0
CC[:,maxoff:-maxoff] = 0
rloc,cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
CCmax=CC[rloc,cloc];
rfzero = sum(abs(buf1ft)**2)/(m*n);
rgzero = sum(abs(buf2ft)**2)/(m*n);
error = 1.0 - CCmax * conj(CCmax)/(rgzero*rfzero);
error = sqrt(abs(error));
diffphase=arctan2(imag(CCmax),real(CCmax));
md2 = fix(m/2);
nd2 = fix(n/2);
if rloc > md2:
row_shift = rloc - m;
else:
row_shift = rloc;
if cloc > nd2:
col_shift = cloc - n;
else:
col_shift = cloc;
#output=[error,diffphase,row_shift,col_shift];
output=[row_shift,col_shift]
# Partial-pixel shift
else:
if DEBUG: import pylab
# First upsample by a factor of 2 to obtain initial estimate
# Embed Fourier data in a 2x larger array
[m,n]=shape(buf1ft);
mlarge=m*2;
nlarge=n*2;
CClarge=zeros([mlarge,nlarge], dtype='complex');
#CClarge[m-fix(m/2):m+fix((m-1)/2)+1,n-fix(n/2):n+fix((n-1)/2)+1] = fftshift(buf1ft) * conj(fftshift(buf2ft));
CClarge[round(mlarge/4.):round(mlarge/4.*3),round(nlarge/4.):round(nlarge/4.*3)] = fftshift(buf1ft) * conj(fftshift(buf2ft));
# note that matlab uses fix which is trunc... ?
# Compute crosscorrelation and locate the peak
CC = ifft2(ifftshift(CClarge)); # Calculate cross-correlation
if maxoff is None:
rloc,cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
CCmax=CC[rloc,cloc];
else:
# set the interior of the shifted array to zero
# (i.e., ignore it)
CC[maxoff:-maxoff,:] = 0
CC[:,maxoff:-maxoff] = 0
rloc,cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
CCmax=CC[rloc,cloc];
if DEBUG:
pylab.figure(1)
pylab.clf()
pylab.subplot(131)
pylab.imshow(real(CC)); pylab.title("Cross-Correlation (upsampled 2x)")
pylab.subplot(132)
ups = dftups((buf1ft) * conj((buf2ft)),mlarge,nlarge,2,0,0); pylab.title("dftups upsampled 2x")
pylab.imshow(real(((ups))))
pylab.subplot(133)
pylab.imshow(real(CC)/real(ups)); pylab.title("Ratio upsampled/dftupsampled")
print "Upsample by 2 peak: ",rloc,cloc," using dft version: ",np.unravel_index(abs(ups).argmax(), ups.shape)
#print np.unravel_index(ups.argmax(),ups.shape)
# Obtain shift in original pixel grid from the position of the
# crosscorrelation peak
[m,n] = shape(CC); md2 = trunc(m/2); nd2 = trunc(n/2);
if rloc > md2 :
row_shift2 = rloc - m;
else:
row_shift2 = rloc;
if cloc > nd2:
col_shift2 = cloc - n;
else:
col_shift2 = cloc;
row_shift2=row_shift2/2.;
col_shift2=col_shift2/2.;
if DEBUG: print "row_shift/col_shift from ups2: ",row_shift2,col_shift2
# If upsampling > 2, then refine estimate with matrix multiply DFT
if usfac > 2:
#%% DFT computation %%%
# Initial shift estimate in upsampled grid
zoom_factor=1.5
if DEBUG: print row_shift2, col_shift2
row_shift0 = round(row_shift2*usfac)/usfac;
col_shift0 = round(col_shift2*usfac)/usfac;
dftshift = trunc(ceil(usfac*zoom_factor)/2); #% Center of output array at dftshift+1
if DEBUG: print 'dftshift,rs,cs,zf:',dftshift, row_shift0, col_shift0, usfac*zoom_factor
# Matrix multiply DFT around the current shift estimate
roff = dftshift-row_shift0*usfac
coff = dftshift-col_shift0*usfac
upsampled = dftups(
(buf2ft * conj(buf1ft)),
ceil(usfac*zoom_factor),
ceil(usfac*zoom_factor),
usfac,
roff,
coff)
#CC = conj(dftups(buf2ft.*conj(buf1ft),ceil(usfac*1.5),ceil(usfac*1.5),usfac,...
# dftshift-row_shift*usfac,dftshift-col_shift*usfac))/(md2*nd2*usfac^2);
CC = conj(upsampled)/(md2*nd2*usfac**2);
if DEBUG:
pylab.figure(2)
pylab.clf()
pylab.subplot(221)
pylab.imshow(abs(upsampled)); pylab.title('upsampled')
pylab.subplot(222)
pylab.imshow(abs(CC)); pylab.title('CC upsampled')
pylab.subplot(223); pylab.imshow(np.abs(np.fft.fftshift(np.fft.ifft2(buf2ft * conj(buf1ft))))); pylab.title('xc')
yy,xx = np.indices([m*usfac,n*usfac],dtype='float')
pylab.contour(yy/usfac/2.-0.5+1,xx/usfac/2.-0.5-1, np.abs(dftups((buf2ft*conj(buf1ft)),m*usfac,n*usfac,usfac)))
pylab.subplot(224); pylab.imshow(np.abs(dftups((buf2ft*conj(buf1ft)),ceil(usfac*zoom_factor),ceil(usfac*zoom_factor),usfac))); pylab.title('unshifted ups')
# Locate maximum and map back to original pixel grid
rloc,cloc = np.unravel_index(abs(CC).argmax(), CC.shape)
rloc0,cloc0 = np.unravel_index(abs(CC).argmax(), CC.shape)
CCmax = CC[rloc,cloc]
#[max1,loc1] = CC.max(axis=0), CC.argmax(axis=0)
#[max2,loc2] = max1.max(),max1.argmax()
#rloc=loc1[loc2];
#cloc=loc2;
#CCmax = CC[rloc,cloc];
rg00 = dftups(buf1ft * conj(buf1ft),1,1,usfac)/(md2*nd2*usfac**2);
rf00 = dftups(buf2ft * conj(buf2ft),1,1,usfac)/(md2*nd2*usfac**2);
#if DEBUG: print rloc,row_shift,cloc,col_shift,dftshift
rloc = rloc - dftshift #+ 1 # +1 # questionable/failed hack + 1;
cloc = cloc - dftshift #+ 1 # -1 # questionable/failed hack - 1;
#if DEBUG: print rloc,row_shift,cloc,col_shift,dftshift
row_shift = row_shift0 + rloc/usfac;
col_shift = col_shift0 + cloc/usfac;
#if DEBUG: print rloc/usfac,row_shift,cloc/usfac,col_shift
if DEBUG: print "Off by: ",(0.25 - float(rloc)/usfac)*usfac , (-0.25 - float(cloc)/usfac)*usfac
if DEBUG: print "correction was: ",rloc/usfac, cloc/usfac
if DEBUG: print "Coordinate went from",row_shift2,col_shift2,"to",row_shift0,col_shift0,"to", row_shift, col_shift
if DEBUG: print "dftsh - usfac:", dftshift-usfac
if DEBUG: print rloc,cloc,row_shift,col_shift,CCmax,dftshift,rloc0,cloc0
# If upsampling = 2, no additional pixel shift refinement
else:
rg00 = sum(sum( buf1ft * conj(buf1ft) ))/m/n;
rf00 = sum(sum( buf2ft * conj(buf2ft) ))/m/n;
row_shift = row_shift2
col_shift = col_shift2
error = 1.0 - CCmax * conj(CCmax)/(rg00*rf00);
error = sqrt(abs(error));
diffphase=arctan2(imag(CCmax),real(CCmax));
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
if md2 == 1:
row_shift = 0;
if nd2 == 1:
col_shift = 0;
#output=[error,diffphase,row_shift,col_shift];
output=[row_shift,col_shift]
if return_error:
# simple estimate of the precision of the fft approach
output += [1./usfac,1./usfac]
# Compute registered version of buf2ft
if (return_registered):
if (usfac > 0):
nr,nc=shape(buf2ft);
Nr = np.fft.ifftshift(np.linspace(-np.fix(nr/2),np.ceil(nr/2)-1,nr))
Nc = np.fft.ifftshift(np.linspace(-np.fix(nc/2),np.ceil(nc/2)-1,nc))
[Nc,Nr] = np.meshgrid(Nc,Nr);
Greg = buf2ft * np.exp(1j*2*np.pi*(-row_shift*Nr/nr-col_shift*Nc/nc));
Greg = Greg*np.exp(1j*diffphase);
elif (usfac == 0):
Greg = buf2ft*np.exp(1j*diffphase);
output.append(Greg)
return output
|
|
# Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
__author__ = "Microsoft Corporation <ptvshelp@microsoft.com>"
__version__ = "3.0.0.0"
import os
import sys
import json
import unittest
import socket
import traceback
from types import CodeType, FunctionType
import signal
try:
import thread
except:
import _thread as thread
class _TestOutput(object):
"""file like object which redirects output to the repl window."""
errors = 'strict'
def __init__(self, old_out, is_stdout):
self.is_stdout = is_stdout
self.old_out = old_out
if sys.version >= '3.' and hasattr(old_out, 'buffer'):
self.buffer = _TestOutputBuffer(old_out.buffer, is_stdout)
def flush(self):
if self.old_out:
self.old_out.flush()
def writelines(self, lines):
for line in lines:
self.write(line)
@property
def encoding(self):
return 'utf8'
def write(self, value):
_channel.send_event('stdout' if self.is_stdout else 'stderr', content=value)
if self.old_out:
self.old_out.write(value)
# flush immediately, else things go wonky and out of order
self.flush()
def isatty(self):
return True
def next(self):
pass
@property
def name(self):
if self.is_stdout:
return "<stdout>"
else:
return "<stderr>"
def __getattr__(self, name):
return getattr(self.old_out, name)
class _TestOutputBuffer(object):
def __init__(self, old_buffer, is_stdout):
self.buffer = old_buffer
self.is_stdout = is_stdout
def write(self, data):
_channel.send_event('stdout' if self.is_stdout else 'stderr', content=data)
self.buffer.write(data)
def flush(self):
self.buffer.flush()
def truncate(self, pos = None):
return self.buffer.truncate(pos)
def tell(self):
return self.buffer.tell()
def seek(self, pos, whence = 0):
return self.buffer.seek(pos, whence)
class _IpcChannel(object):
def __init__(self, socket, callback):
self.socket = socket
self.seq = 0
self.callback = callback
self.lock = thread.allocate_lock()
self._closed = False
# start the testing reader thread loop
self.test_thread_id = thread.start_new_thread(self.readSocket, ())
def close(self):
self._closed = True
def readSocket(self):
try:
data = self.socket.recv(1024)
self.callback()
except OSError:
if not self._closed:
raise
def receive(self):
pass
def send_event(self, name, **args):
with self.lock:
body = {'type': 'event', 'seq': self.seq, 'event':name, 'body':args}
self.seq += 1
content = json.dumps(body).encode('utf8')
headers = ('Content-Length: %d\n\n' % (len(content), )).encode('utf8')
self.socket.send(headers)
self.socket.send(content)
_channel = None
class VsTestResult(unittest.TextTestResult):
def startTest(self, test):
super(VsTestResult, self).startTest(test)
if _channel is not None:
_channel.send_event(
name='start',
test = test.id()
)
def addError(self, test, err):
super(VsTestResult, self).addError(test, err)
self.sendResult(test, 'error', err)
def addFailure(self, test, err):
super(VsTestResult, self).addFailure(test, err)
self.sendResult(test, 'failed', err)
def addSuccess(self, test):
super(VsTestResult, self).addSuccess(test)
self.sendResult(test, 'passed')
def addSkip(self, test, reason):
super(VsTestResult, self).addSkip(test, reason)
self.sendResult(test, 'skipped')
def addExpectedFailure(self, test, err):
super(VsTestResult, self).addExpectedFailure(test, err)
self.sendResult(test, 'failed', err)
def addUnexpectedSuccess(self, test):
super(VsTestResult, self).addUnexpectedSuccess(test)
self.sendResult(test, 'passed')
def sendResult(self, test, outcome, trace = None):
if _channel is not None:
tb = None
message = None
if trace is not None:
traceback.print_exc()
formatted = traceback.format_exception(*trace)
# Remove the 'Traceback (most recent call last)'
formatted = formatted[1:]
tb = ''.join(formatted)
message = str(trace[1])
_channel.send_event(
name='result',
outcome=outcome,
traceback = tb,
message = message,
test = test.id()
)
def stopTests():
try:
os.kill(os.getpid(), signal.SIGUSR1)
except:
try:
os.kill(os.getpid(), signal.SIGTERM)
except:
pass
class ExitCommand(Exception):
pass
def signal_handler(signal, frame):
raise ExitCommand()
def main():
import os
import sys
import unittest
from optparse import OptionParser
global _channel
parser = OptionParser(prog = 'visualstudio_py_testlauncher', usage = 'Usage: %prog [<option>] <test names>... ')
parser.add_option('--debug', action='store_true', help='Whether debugging the unit tests')
parser.add_option('-x', '--mixed-mode', action='store_true', help='wait for mixed-mode debugger to attach')
parser.add_option('-t', '--test', type='str', dest='tests', action='append', help='specifies a test to run')
parser.add_option('--testFile', type='str', help='Fully qualitified path to file name')
parser.add_option('-c', '--coverage', type='str', help='enable code coverage and specify filename')
parser.add_option('-r', '--result-port', type='int', help='connect to port on localhost and send test results')
parser.add_option('--us', type='str', help='Directory to start discovery')
parser.add_option('--up', type='str', help='Pattern to match test files (''test*.py'' default)')
parser.add_option('--ut', type='str', help='Top level directory of project (default to start directory)')
parser.add_option('--uvInt', '--verboseInt', type='int', help='Verbose output (0 none, 1 (no -v) simple, 2 (-v) full)')
parser.add_option('--uf', '--failfast', type='str', help='Stop on first failure')
parser.add_option('--uc', '--catch', type='str', help='Catch control-C and display results')
(opts, _) = parser.parse_args()
if opts.debug:
from ptvsd.visualstudio_py_debugger import DONT_DEBUG, DEBUG_ENTRYPOINTS, get_code
sys.path[0] = os.getcwd()
if opts.result_port:
try:
signal.signal(signal.SIGUSR1, signal_handler)
except:
try:
signal.signal(signal.SIGTERM, signal_handler)
except:
pass
_channel = _IpcChannel(socket.create_connection(('127.0.0.1', opts.result_port)), stopTests)
sys.stdout = _TestOutput(sys.stdout, is_stdout = True)
sys.stderr = _TestOutput(sys.stderr, is_stdout = False)
if opts.debug:
DONT_DEBUG.append(os.path.normcase(__file__))
DEBUG_ENTRYPOINTS.add(get_code(main))
pass
elif opts.mixed_mode:
# For mixed-mode attach, there's no ptvsd and hence no wait_for_attach(),
# so we have to use Win32 API in a loop to do the same thing.
from time import sleep
from ctypes import windll, c_char
while True:
if windll.kernel32.IsDebuggerPresent() != 0:
break
sleep(0.1)
try:
debugger_helper = windll['Microsoft.PythonTools.Debugger.Helper.x86.dll']
except WindowsError:
debugger_helper = windll['Microsoft.PythonTools.Debugger.Helper.x64.dll']
isTracing = c_char.in_dll(debugger_helper, "isTracing")
while True:
if isTracing.value != 0:
break
sleep(0.1)
cov = None
try:
if opts.coverage:
try:
import coverage
cov = coverage.coverage(opts.coverage)
cov.load()
cov.start()
except:
pass
if opts.tests is None and opts.testFile is None:
if opts.us is None:
opts.us = '.'
if opts.up is None:
opts.up = 'test*.py'
tests = unittest.defaultTestLoader.discover(opts.us, opts.up)
else:
# loadTestsFromNames doesn't work well (with duplicate file names or class names)
# Easier approach is find the test suite and use that for running
loader = unittest.TestLoader()
# opts.us will be passed in
suites = loader.discover(opts.us, pattern=os.path.basename(opts.testFile))
suite = None
tests = None
if opts.tests is None:
# Run everything in the test file
tests = suites
else:
# Run a specific test class or test method
for test_suite in suites._tests:
for cls in test_suite._tests:
try:
for m in cls._tests:
testId = m.id()
if testId.startswith(opts.tests[0]):
suite = cls
if testId == opts.tests[0]:
tests = m
break
except Exception as err:
errorMessage = traceback.format_exception()
pass
if tests is None:
tests = suite
if tests is None and suite is None:
_channel.send_event(
name='error',
outcome='',
traceback = '',
message = 'Failed to identify the test',
test = ''
)
if opts.uvInt is None:
opts.uvInt = 0
if opts.uf is not None:
runner = unittest.TextTestRunner(verbosity=opts.uvInt, resultclass=VsTestResult, failfast=True)
else:
runner = unittest.TextTestRunner(verbosity=opts.uvInt, resultclass=VsTestResult)
result = runner.run(tests)
if _channel is not None:
_channel.close()
sys.exit(not result.wasSuccessful())
finally:
if cov is not None:
cov.stop()
cov.save()
cov.xml_report(outfile = opts.coverage + '.xml', omit=__file__)
if _channel is not None:
_channel.send_event(
name='done'
)
_channel.socket.close()
# prevent generation of the error 'Error in sys.exitfunc:'
try:
sys.stdout.close()
except:
pass
try:
sys.stderr.close()
except:
pass
if __name__ == '__main__':
main()
|
|
PING_WAIT = 300 # Seconds
PING_MIN_WAIT = 30
MINIMUM_WAIT = 60
EXTRA_WAIT = 20
MAXIMUM_WAITED = 2 # limit for amount of !wait's
STATS_RATE_LIMIT = 15
VOTES_RATE_LIMIT = 15
ADMINS_RATE_LIMIT = 300
SHOTS_MULTIPLIER = .12 # ceil(shots_multiplier * len_players) = bullets given
MAX_PLAYERS = 30
DRUNK_SHOTS_MULTIPLIER = 3
NIGHT_TIME_LIMIT = 120
NIGHT_TIME_WARN = 0 # should be less than NIGHT_TIME_LIMIT
DAY_TIME_LIMIT_WARN = 780
DAY_TIME_LIMIT_CHANGE = 120 # seconds after DAY_TIME_LIMIT_WARN has passed
KILL_IDLE_TIME = 300
WARN_IDLE_TIME = 180
PART_GRACE_TIME = 7
QUIT_GRACE_TIME = 30
MAX_PRIVMSG_TARGETS = 1
LOG_FILENAME = ""
BARE_LOG_FILENAME = ""
# HIT MISS SUICIDE
GUN_CHANCES = ( 5/7 , 1/7 , 1/7 )
DRUNK_GUN_CHANCES = ( 2/7 , 4/7 , 1/7 )
MANSLAUGHTER_CHANCE = 1/5 # ACCIDENTAL HEADSHOT (FATAL)
GUNNER_KILLS_WOLF_AT_NIGHT_CHANCE = 0
GUARDIAN_ANGEL_DIES_CHANCE = 1/2
DETECTIVE_REVEALED_CHANCE = 2/5
#################################################################################################################
# ROLE INDEX: PLAYERS SEER WOLF CURSED DRUNK HARLOT TRAITOR GUNNER CROW ANGEL DETECTIVE ##
#################################################################################################################
ROLES_GUIDE = { 4 : ( 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), ##
6 : ( 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ), ##
8 : ( 1 , 2 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 ), ##
10 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 ), ##
11 : ( 1 , 2 , 1 , 1 , 1 , 1 , 1 , 0 , 1 , 0 ), ##
15 : ( 1 , 3 , 1 , 1 , 1 , 1 , 1 , 0 , 1 , 1 ), ##
22 : ( 1 , 4 , 1 , 1 , 1 , 1 , 1 , 0 , 1 , 1 ), ##
29 : ( 1 , 5 , 1 , 1 , 1 , 1 , 1 , 0 , 1 , 1 ), ##
None : ( 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )} ##
#################################################################################################################
# Notes: ##
#################################################################################################################
GAME_MODES = {}
AWAY = [] # cloaks of people who are away.
SIMPLE_NOTIFY = [] # cloaks of people who !simple, who want everything /notice'd
ROLE_INDICES = {0 : "seer",
1 : "wolf",
2 : "cursed villager",
3 : "village drunk",
4 : "harlot",
5 : "traitor",
6 : "gunner",
7 : "werecrow",
8 : "guardian angel",
9 : "detective"}
INDEX_OF_ROLE = dict((v,k) for k,v in ROLE_INDICES.items())
NO_VICTIMS_MESSAGES = ("The body of a young penguin pet is found.",
"A pool of blood and wolf paw prints are found.",
"Traces of wolf fur are found.")
LYNCH_MESSAGES = ("The villagers, after much debate, finally decide on lynching \u0002{0}\u0002, who turned out to be... a \u0002{1}\u0002.",
"Under a lot of noise, the pitchfork-bearing villagers lynch \u0002{0}\u0002, who turned out to be... a \u0002{1}\u0002.",
"The mob drags a protesting \u0002{0}\u0002 to the hanging tree. S/He succumbs to the will of the horde, and is hanged. It is discovered (s)he was a \u0002{1}\u0002.",
"Resigned to his/her fate, \u0002{0}\u0002 is led to the gallows. After death, it is discovered (s)he was a \u0002{1}\u0002.")
import botconfig
RULES = (botconfig.CHANNEL + " channel rules: 1) Be nice to others. 2) Do not share information "+
"after death. 3) No bots allowed. 4) Do not play with clones.\n"+
"5) Do not quit unless you need to leave. 6) No swearing and keep it "+
"family-friendly. 7) Do not paste PM's from the bot during the game. "+
"8) Use common sense. 9) Waiting for timeouts is discouraged.")
# Other settings:
START_WITH_DAY = False
WOLF_STEALS_GUN = False # at night, the wolf can steal steal the victim's bullets
OPT_IN_PING = False # instead of !away/!back, users can opt-in to be pinged
PING_IN = [] # cloaks of users who have opted in for ping
is_role = lambda plyr, rol: rol in ROLES and plyr in ROLES[rol]
def plural(role):
if role == "wolf": return "wolves"
elif role == "person": return "people"
else: return role + "s"
def list_players():
pl = []
for x in ROLES.values():
pl.extend(x)
return pl
def list_players_and_roles():
plr = {}
for x in ROLES.keys():
for p in ROLES[x]:
plr[p] = x
return plr
get_role = lambda plyr: list_players_and_roles()[plyr]
def del_player(pname):
prole = get_role(pname)
ROLES[prole].remove(pname)
class InvalidModeException(Exception): pass
def game_mode(name):
def decor(c):
GAME_MODES[name] = c
return c
return decor
CHANGEABLE_ROLES = { "seers" : INDEX_OF_ROLE["seer"],
"wolves" : INDEX_OF_ROLE["wolf"],
"cursed" : INDEX_OF_ROLE["cursed villager"],
"drunks" : INDEX_OF_ROLE["village drunk"],
"harlots" : INDEX_OF_ROLE["harlot"],
"traitors" : INDEX_OF_ROLE["traitor"],
"gunners" : INDEX_OF_ROLE["gunner"],
"werecrows" : INDEX_OF_ROLE["werecrow"],
"angels" : INDEX_OF_ROLE["guardian angel"],
"detectives" : INDEX_OF_ROLE["detective"]}
# TODO: implement game modes
@game_mode("roles")
class ChangedRolesMode(object):
"""Example: !fgame roles=wolves:1,seers:0,angels:1"""
def __init__(self, arg):
self.ROLES_GUIDE = ROLES_GUIDE.copy()
lx = list(ROLES_GUIDE[None])
pairs = arg.split(",")
pl = list_players()
if not pairs:
raise InvalidModeException("Invalid syntax for mode roles.")
for pair in pairs:
change = pair.split(":")
if len(change) != 2:
raise InvalidModeException("Invalid syntax for mode roles.")
role, num = change
try:
num = int(num)
try:
lx[CHANGEABLE_ROLES[role.lower()]] = num
except KeyError:
raise InvalidModeException(("The role \u0002{0}\u0002 "+
"is not valid.").format(role))
except ValueError:
raise InvalidModeException("A bad value was used in mode roles.")
for k in ROLES_GUIDE.keys():
self.ROLES_GUIDE[k] = tuple(lx)
# Persistence
# Load saved settings
import sqlite3
import os
conn = sqlite3.connect("data.sqlite3", check_same_thread = False)
with conn:
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS away (nick TEXT)') # whoops, i mean cloak, not nick
c.execute('CREATE TABLE IF NOT EXISTS simple_role_notify (cloak TEXT)') # people who understand each role
c.execute('SELECT * FROM away')
for row in c:
AWAY.append(row[0])
c.execute('SELECT * FROM simple_role_notify')
for row in c:
SIMPLE_NOTIFY.append(row[0])
# populate the roles table
c.execute('DROP TABLE IF EXISTS roles')
c.execute('CREATE TABLE roles (id INTEGER PRIMARY KEY AUTOINCREMENT, role TEXT)')
for x in ["villager"]+list(ROLE_INDICES.values()):
c.execute("INSERT OR REPLACE INTO roles (role) VALUES (?)", (x,))
c.execute(('CREATE TABLE IF NOT EXISTS rolestats (player TEXT, role TEXT, '+
'teamwins SMALLINT, individualwins SMALLINT, totalgames SMALLINT, '+
'UNIQUE(player, role))'))
if OPT_IN_PING:
c.execute('CREATE TABLE IF NOT EXISTS ping (cloak text)')
c.execute('SELECT * FROM ping')
for row in c:
PING_IN.append(row[0])
def remove_away(clk):
with conn:
c.execute('DELETE from away where nick=?', (clk,))
def add_away(clk):
with conn:
c.execute('INSERT into away VALUES (?)', (clk,))
def remove_simple_rolemsg(clk):
with conn:
c.execute('DELETE from simple_role_notify where cloak=?', (clk,))
def add_simple_rolemsg(clk):
with conn:
c.execute('INSERT into simple_role_notify VALUES (?)', (clk,))
def remove_ping(clk):
with conn:
c.execute('DELETE from ping where cloak=?', (clk,))
def add_ping(clk):
with conn:
c.execute('INSERT into ping VALUES (?)', (clk,))
def update_role_stats(acc, role, won, iwon):
with conn:
wins, iwins, totalgames = 0, 0, 0
c.execute(("SELECT teamwins, individualwins, totalgames FROM rolestats "+
"WHERE player=? AND role=?"), (acc, role))
row = c.fetchone()
if row:
wins, iwins, total = row
else:
wins, iwins, total = 0,0,0
if won:
wins += 1
if iwon:
iwins += 1
total += 1
c.execute("INSERT OR REPLACE INTO rolestats VALUES (?,?,?,?,?)",
(acc, role, wins, iwins, total))
|
|
"Test the functionality of Python classes implementing operators."
import unittest
from test import support
testmeths = [
# Binary operations
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"truediv",
"rtruediv",
"mod",
"rmod",
"divmod",
"rdivmod",
"pow",
"rpow",
"rshift",
"rrshift",
"lshift",
"rlshift",
"and",
"rand",
"or",
"ror",
"xor",
"rxor",
# List/dict operations
"contains",
"getitem",
"setitem",
"delitem",
# Unary operations
"neg",
"pos",
"abs",
# generic operations
"init",
]
# These need to return something other than None
# "hash",
# "str",
# "repr",
# "int",
# "float",
# These are separate because they can influence the test of other methods.
# "getattr",
# "setattr",
# "delattr",
callLst = []
def trackCall(f):
def track(*args, **kwargs):
callLst.append((f.__name__, args))
return f(*args, **kwargs)
return track
statictests = """
@trackCall
def __hash__(self, *args):
return hash(id(self))
@trackCall
def __str__(self, *args):
return "AllTests"
@trackCall
def __repr__(self, *args):
return "AllTests"
@trackCall
def __int__(self, *args):
return 1
@trackCall
def __index__(self, *args):
return 1
@trackCall
def __float__(self, *args):
return 1.0
@trackCall
def __eq__(self, *args):
return True
@trackCall
def __ne__(self, *args):
return False
@trackCall
def __lt__(self, *args):
return False
@trackCall
def __le__(self, *args):
return True
@trackCall
def __gt__(self, *args):
return False
@trackCall
def __ge__(self, *args):
return True
"""
# Synthesize all the other AllTests methods from the names in testmeths.
method_template = """\
@trackCall
def __%s__(self, *args):
pass
"""
d = {}
exec(statictests, globals(), d)
for method in testmeths:
exec(method_template % method, globals(), d)
AllTests = type("AllTests", (object,), d)
del d, statictests, method, method_template
class ClassTests(unittest.TestCase):
def setUp(self):
callLst[:] = []
def assertCallStack(self, expected_calls):
actualCallList = callLst[:] # need to copy because the comparison below will add
# additional calls to callLst
if expected_calls != actualCallList:
self.fail("Expected call list:\n %s\ndoes not match actual call list\n %s" %
(expected_calls, actualCallList))
def testInit(self):
foo = AllTests()
self.assertCallStack([("__init__", (foo,))])
def testBinaryOps(self):
testme = AllTests()
# Binary operations
callLst[:] = []
testme + 1
self.assertCallStack([("__add__", (testme, 1))])
callLst[:] = []
1 + testme
self.assertCallStack([("__radd__", (testme, 1))])
callLst[:] = []
testme - 1
self.assertCallStack([("__sub__", (testme, 1))])
callLst[:] = []
1 - testme
self.assertCallStack([("__rsub__", (testme, 1))])
callLst[:] = []
testme * 1
self.assertCallStack([("__mul__", (testme, 1))])
callLst[:] = []
1 * testme
self.assertCallStack([("__rmul__", (testme, 1))])
if 1/2 == 0:
callLst[:] = []
testme / 1
self.assertCallStack([("__div__", (testme, 1))])
callLst[:] = []
1 / testme
self.assertCallStack([("__rdiv__", (testme, 1))])
callLst[:] = []
testme % 1
self.assertCallStack([("__mod__", (testme, 1))])
callLst[:] = []
1 % testme
self.assertCallStack([("__rmod__", (testme, 1))])
callLst[:] = []
divmod(testme,1)
self.assertCallStack([("__divmod__", (testme, 1))])
callLst[:] = []
divmod(1, testme)
self.assertCallStack([("__rdivmod__", (testme, 1))])
callLst[:] = []
testme ** 1
self.assertCallStack([("__pow__", (testme, 1))])
callLst[:] = []
1 ** testme
self.assertCallStack([("__rpow__", (testme, 1))])
callLst[:] = []
testme >> 1
self.assertCallStack([("__rshift__", (testme, 1))])
callLst[:] = []
1 >> testme
self.assertCallStack([("__rrshift__", (testme, 1))])
callLst[:] = []
testme << 1
self.assertCallStack([("__lshift__", (testme, 1))])
callLst[:] = []
1 << testme
self.assertCallStack([("__rlshift__", (testme, 1))])
callLst[:] = []
testme & 1
self.assertCallStack([("__and__", (testme, 1))])
callLst[:] = []
1 & testme
self.assertCallStack([("__rand__", (testme, 1))])
callLst[:] = []
testme | 1
self.assertCallStack([("__or__", (testme, 1))])
callLst[:] = []
1 | testme
self.assertCallStack([("__ror__", (testme, 1))])
callLst[:] = []
testme ^ 1
self.assertCallStack([("__xor__", (testme, 1))])
callLst[:] = []
1 ^ testme
self.assertCallStack([("__rxor__", (testme, 1))])
def testListAndDictOps(self):
testme = AllTests()
# List/dict operations
class Empty: pass
try:
1 in Empty()
self.fail('failed, should have raised TypeError')
except TypeError:
pass
callLst[:] = []
1 in testme
self.assertCallStack([('__contains__', (testme, 1))])
callLst[:] = []
testme[1]
self.assertCallStack([('__getitem__', (testme, 1))])
callLst[:] = []
testme[1] = 1
self.assertCallStack([('__setitem__', (testme, 1, 1))])
callLst[:] = []
del testme[1]
self.assertCallStack([('__delitem__', (testme, 1))])
callLst[:] = []
testme[:42]
self.assertCallStack([('__getitem__', (testme, slice(None, 42)))])
callLst[:] = []
testme[:42] = "The Answer"
self.assertCallStack([('__setitem__', (testme, slice(None, 42),
"The Answer"))])
callLst[:] = []
del testme[:42]
self.assertCallStack([('__delitem__', (testme, slice(None, 42)))])
callLst[:] = []
testme[2:1024:10]
self.assertCallStack([('__getitem__', (testme, slice(2, 1024, 10)))])
callLst[:] = []
testme[2:1024:10] = "A lot"
self.assertCallStack([('__setitem__', (testme, slice(2, 1024, 10),
"A lot"))])
callLst[:] = []
del testme[2:1024:10]
self.assertCallStack([('__delitem__', (testme, slice(2, 1024, 10)))])
callLst[:] = []
testme[:42, ..., :24:, 24, 100]
self.assertCallStack([('__getitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100)))])
callLst[:] = []
testme[:42, ..., :24:, 24, 100] = "Strange"
self.assertCallStack([('__setitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100), "Strange"))])
callLst[:] = []
del testme[:42, ..., :24:, 24, 100]
self.assertCallStack([('__delitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100)))])
def testUnaryOps(self):
testme = AllTests()
callLst[:] = []
-testme
self.assertCallStack([('__neg__', (testme,))])
callLst[:] = []
+testme
self.assertCallStack([('__pos__', (testme,))])
callLst[:] = []
abs(testme)
self.assertCallStack([('__abs__', (testme,))])
callLst[:] = []
int(testme)
self.assertCallStack([('__int__', (testme,))])
callLst[:] = []
float(testme)
self.assertCallStack([('__float__', (testme,))])
callLst[:] = []
oct(testme)
self.assertCallStack([('__index__', (testme,))])
callLst[:] = []
hex(testme)
self.assertCallStack([('__index__', (testme,))])
def testMisc(self):
testme = AllTests()
callLst[:] = []
hash(testme)
self.assertCallStack([('__hash__', (testme,))])
callLst[:] = []
repr(testme)
self.assertCallStack([('__repr__', (testme,))])
callLst[:] = []
str(testme)
self.assertCallStack([('__str__', (testme,))])
callLst[:] = []
testme == 1
self.assertCallStack([('__eq__', (testme, 1))])
callLst[:] = []
testme < 1
self.assertCallStack([('__lt__', (testme, 1))])
callLst[:] = []
testme > 1
self.assertCallStack([('__gt__', (testme, 1))])
callLst[:] = []
testme != 1
self.assertCallStack([('__ne__', (testme, 1))])
callLst[:] = []
1 == testme
self.assertCallStack([('__eq__', (1, testme))])
callLst[:] = []
1 < testme
self.assertCallStack([('__gt__', (1, testme))])
callLst[:] = []
1 > testme
self.assertCallStack([('__lt__', (1, testme))])
callLst[:] = []
1 != testme
self.assertCallStack([('__ne__', (1, testme))])
def testGetSetAndDel(self):
# Interfering tests
class ExtraTests(AllTests):
@trackCall
def __getattr__(self, *args):
return "SomeVal"
@trackCall
def __setattr__(self, *args):
pass
@trackCall
def __delattr__(self, *args):
pass
testme = ExtraTests()
callLst[:] = []
testme.spam
self.assertCallStack([('__getattr__', (testme, "spam"))])
callLst[:] = []
testme.eggs = "spam, spam, spam and ham"
self.assertCallStack([('__setattr__', (testme, "eggs",
"spam, spam, spam and ham"))])
callLst[:] = []
del testme.cardinal
self.assertCallStack([('__delattr__', (testme, "cardinal"))])
def testDel(self):
x = []
class DelTest:
def __del__(self):
x.append("crab people, crab people")
testme = DelTest()
del testme
import gc
gc.collect()
self.assertEquals(["crab people, crab people"], x)
def testBadTypeReturned(self):
# return values of some method are type-checked
class BadTypeClass:
def __int__(self):
return None
__float__ = __int__
__str__ = __int__
__repr__ = __int__
__oct__ = __int__
__hex__ = __int__
for f in [int, float, str, repr, oct, hex]:
self.assertRaises(TypeError, f, BadTypeClass())
def testHashStuff(self):
# Test correct errors from hash() on objects with comparisons but
# no __hash__
class C0:
pass
hash(C0()) # This should work; the next two should raise TypeError
class C2:
def __eq__(self, other): return 1
self.assertRaises(TypeError, hash, C2())
def testSFBug532646(self):
# Test for SF bug 532646
class A:
pass
A.__call__ = A()
a = A()
try:
a() # This should not segfault
except RuntimeError:
pass
else:
self.fail("Failed to raise RuntimeError")
def testForExceptionsRaisedInInstanceGetattr2(self):
# Tests for exceptions raised in instance_getattr2().
def booh(self):
raise AttributeError("booh")
class A:
a = property(booh)
try:
A().a # Raised AttributeError: A instance has no attribute 'a'
except AttributeError as x:
if str(x) != "booh":
self.fail("attribute error for A().a got masked: %s" % x)
class E:
__eq__ = property(booh)
E() == E() # In debug mode, caused a C-level assert() to fail
class I:
__init__ = property(booh)
try:
# In debug mode, printed XXX undetected error and
# raises AttributeError
I()
except AttributeError as x:
pass
else:
self.fail("attribute error for I.__init__ got masked")
def testHashComparisonOfMethods(self):
# Test comparison and hash of methods
class A:
def __init__(self, x):
self.x = x
def f(self):
pass
def g(self):
pass
def __eq__(self, other):
return self.x == other.x
def __hash__(self):
return self.x
class B(A):
pass
a1 = A(1)
a2 = A(2)
self.assertEquals(a1.f, a1.f)
self.assertNotEquals(a1.f, a2.f)
self.assertNotEquals(a1.f, a1.g)
self.assertEquals(a1.f, A(1).f)
self.assertEquals(hash(a1.f), hash(a1.f))
self.assertEquals(hash(a1.f), hash(A(1).f))
self.assertNotEquals(A.f, a1.f)
self.assertNotEquals(A.f, A.g)
self.assertEquals(B.f, A.f)
self.assertEquals(hash(B.f), hash(A.f))
# the following triggers a SystemError in 2.4
a = A(hash(A.f)^(-1))
hash(a.f)
def test_main():
support.run_unittest(ClassTests)
if __name__=='__main__':
test_main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A local Predictive Logistic Regression.
This module defines a Logistic Regression to make predictions locally or
embedded into your application without needing to send requests to
BigML.io.
This module cannot only save you a few credits, but also enormously
reduce the latency for each prediction and let you use your logistic
regressions offline.
Example usage (assuming that you have previously set up the BIGML_USERNAME
and BIGML_API_KEY environment variables and that you own the
logisticregression/id below):
from bigml.api import BigML
from bigml.logistic import LogisticRegression
api = BigML()
logistic_regression = LogisticRegression(
'logisticregression/5026965515526876630001b2')
logistic_regression.predict({"petal length": 3, "petal width": 1,
"sepal length": 1, "sepal width": 0.5})
"""
import logging
import math
import copy
from functools import cmp_to_key
from bigml.api import FINISHED
from bigml.api import get_status, get_api_connection, \
get_logistic_regression_id
from bigml.util import cast, check_no_missing_numerics, use_cache, load, \
PRECISION, NUMERIC
from bigml.basemodel import get_resource_dict, extract_objective
from bigml.model import parse_operating_point, sort_categories
from bigml.modelfields import ModelFields
LOGGER = logging.getLogger('BigML')
EXPANSION_ATTRIBUTES = {"categorical": "categories", "text": "tag_cloud",
"items": "items"}
def balance_input(input_data, fields):
"""Balancing the values in the input_data using the corresponding
field scales
"""
for field in input_data:
if fields[field]['optype'] == NUMERIC:
mean = fields[field]['summary'].get('mean', 0)
stddev = fields[field]['summary'].get( \
'standard_deviation', 0)
if mean is None:
mean = 0
if stddev is None:
stddev = 0
# if stddev is not positive, we only substract the mean
input_data[field] = input_data[field] - mean if \
stddev <= 0 else (input_data[field] - mean) / stddev
class LogisticRegression(ModelFields):
""" A lightweight wrapper around a logistic regression model.
Uses a BigML remote logistic regression model to build a local version
that can be used to generate predictions locally.
"""
def __init__(self, logistic_regression, api=None, cache_get=None):
if use_cache(cache_get):
# using a cache to store the model attributes
self.__dict__ = load(get_logistic_regression_id( \
logistic_regression), cache_get)
return
self.resource_id = None
self.class_names = None
self.input_fields = []
self.term_forms = {}
self.tag_clouds = {}
self.term_analysis = {}
self.items = {}
self.item_analysis = {}
self.categories = {}
self.coefficients = {}
self.data_field_types = {}
self.field_codings = {}
self.numeric_fields = {}
self.bias = None
self.missing_numerics = None
self.c = None
self.eps = None
self.lr_normalize = None
self.balance_fields = None
self.regularization = None
api = get_api_connection(api)
old_coefficients = False
self.resource_id, logistic_regression = get_resource_dict( \
logistic_regression, "logisticregression", api=api)
if 'object' in logistic_regression and \
isinstance(logistic_regression['object'], dict):
logistic_regression = logistic_regression['object']
try:
self.input_fields = logistic_regression.get("input_fields", [])
self.dataset_field_types = logistic_regression.get(
"dataset_field_types", {})
self.weight_field = logistic_regression.get("weight_field")
objective_field = logistic_regression['objective_fields'] if \
logistic_regression['objective_fields'] else \
logistic_regression['objective_field']
except KeyError:
raise ValueError("Failed to find the logistic regression expected "
"JSON structure. Check your arguments.")
if 'logistic_regression' in logistic_regression and \
isinstance(logistic_regression['logistic_regression'], dict):
status = get_status(logistic_regression)
if 'code' in status and status['code'] == FINISHED:
logistic_regression_info = logistic_regression[ \
'logistic_regression']
fields = logistic_regression_info.get('fields', {})
if not self.input_fields:
self.input_fields = [ \
field_id for field_id, _ in
sorted(list(fields.items()),
key=lambda x: x[1].get("column_number"))]
self.coefficients.update(logistic_regression_info.get( \
'coefficients', []))
if not isinstance(list(self.coefficients.values())[0][0], list):
old_coefficients = True
self.bias = logistic_regression_info.get('bias', True)
self.c = logistic_regression_info.get('c')
self.eps = logistic_regression_info.get('eps')
self.lr_normalize = logistic_regression_info.get('normalize')
self.balance_fields = logistic_regression_info.get( \
'balance_fields')
self.regularization = logistic_regression_info.get( \
'regularization')
self.field_codings = logistic_regression_info.get( \
'field_codings', {})
# old models have no such attribute, so we set it to False in
# this case
self.missing_numerics = logistic_regression_info.get( \
'missing_numerics', False)
objective_id = extract_objective(objective_field)
missing_tokens = logistic_regression_info.get("missing_tokens")
ModelFields.__init__(
self, fields,
objective_id=objective_id, terms=True, categories=True,
numerics=True, missing_tokens=missing_tokens)
self.field_codings = logistic_regression_info.get( \
'field_codings', {})
self.format_field_codings()
for field_id in self.field_codings:
if field_id not in self.fields and \
field_id in self.inverted_fields:
self.field_codings.update( \
{self.inverted_fields[field_id]: \
self.field_codings[field_id]})
del self.field_codings[field_id]
if old_coefficients:
self.map_coefficients()
categories = self.fields[self.objective_id].get( \
"summary", {}).get('categories')
if len(list(self.coefficients.keys())) > len(categories):
self.class_names = [""]
else:
self.class_names = []
self.class_names.extend(sorted([category[0]
for category in categories]))
# order matters
self.objective_categories = [category[0]
for category in categories]
else:
raise Exception("The logistic regression isn't finished yet")
else:
raise Exception("Cannot create the LogisticRegression instance."
" Could not find the 'logistic_regression' key"
" in the resource:\n\n%s" %
logistic_regression)
def _sort_predictions(self, a, b, criteria):
"""Sorts the categories in the predicted node according to the
given criteria
"""
if a[criteria] == b[criteria]:
return sort_categories(a, b, self.objective_categories)
return 1 if b[criteria] > a[criteria] else - 1
def predict_probability(self, input_data, compact=False):
"""Predicts a probability for each possible output class,
based on input values. The input fields must be a dictionary
keyed by field name or field ID.
:param input_data: Input data to be predicted
:param compact: If False, prediction is returned as a list of maps, one
per class, with the keys "prediction" and "probability"
mapped to the name of the class and it's probability,
respectively. If True, returns a list of probabilities
ordered by the sorted order of the class names.
"""
distribution = self.predict(input_data, full=True)['distribution']
distribution.sort(key=lambda x: x['category'])
if compact:
return [category['probability'] for category in distribution]
return distribution
def predict_operating(self, input_data,
operating_point=None):
"""Computes the prediction based on a user-given operating point.
"""
kind, threshold, positive_class = parse_operating_point( \
operating_point, ["probability"], self.class_names)
predictions = self.predict_probability(input_data, False)
position = self.class_names.index(positive_class)
if predictions[position][kind] > threshold:
prediction = predictions[position]
else:
# if the threshold is not met, the alternative class with
# highest probability or confidence is returned
predictions.sort( \
key=cmp_to_key( \
lambda a, b: self._sort_predictions(a, b, kind)))
prediction = predictions[0: 2]
if prediction[0]["category"] == positive_class:
prediction = prediction[1]
else:
prediction = prediction[0]
prediction["prediction"] = prediction["category"]
del prediction["category"]
return prediction
def predict_operating_kind(self, input_data,
operating_kind=None):
"""Computes the prediction based on a user-given operating kind.
"""
kind = operating_kind.lower()
if kind == "probability":
predictions = self.predict_probability(input_data,
False)
else:
raise ValueError("Only probability is allowed as operating kind"
" for logistic regressions.")
predictions.sort( \
key=cmp_to_key( \
lambda a, b: self._sort_predictions(a, b, kind)))
prediction = predictions[0]
prediction["prediction"] = prediction["category"]
del prediction["category"]
return prediction
def predict(self, input_data,
operating_point=None, operating_kind=None,
full=False):
"""Returns the class prediction and the probability distribution
input_data: Input data to be predicted
operating_point: In classification models, this is the point of the
ROC curve where the model will be used at. The
operating point can be defined in terms of:
- the positive_class, the class that is important to
predict accurately
- the probability_threshold,
the probability that is stablished
as minimum for the positive_class to be predicted.
The operating_point is then defined as a map with
two attributes, e.g.:
{"positive_class": "Iris-setosa",
"probability_threshold": 0.5}
operating_kind: "probability". Sets the
property that decides the prediction. Used only if
no operating_point is used
full: Boolean that controls whether to include the prediction's
attributes. By default, only the prediction is produced. If set
to True, the rest of available information is added in a
dictionary format. The dictionary keys can be:
- prediction: the prediction value
- probability: prediction's probability
- distribution: distribution of probabilities for each
of the objective field classes
- unused_fields: list of fields in the input data that
are not being used in the model
"""
# Checks and cleans input_data leaving the fields used in the model
unused_fields = []
new_data = self.filter_input_data( \
input_data,
add_unused_fields=full)
if full:
input_data, unused_fields = new_data
else:
input_data = new_data
# Strips affixes for numeric values and casts to the final field type
cast(input_data, self.fields)
# When operating_point is used, we need the probabilities
# of all possible classes to decide, so se use
# the `predict_probability` method
if operating_point:
return self.predict_operating( \
input_data, operating_point=operating_point)
if operating_kind:
return self.predict_operating_kind( \
input_data, operating_kind=operating_kind)
# In case that missing_numerics is False, checks that all numeric
# fields are present in input data.
if not self.missing_numerics:
check_no_missing_numerics(input_data, self.model_fields,
self.weight_field)
if self.balance_fields:
balance_input(input_data, self.fields)
# Computes text and categorical field expansion
unique_terms = self.get_unique_terms(input_data)
probabilities = {}
total = 0
# Computes the contributions for each category
for category in self.coefficients:
probability = self.category_probability( \
input_data, unique_terms, category)
try:
order = self.categories[self.objective_id].index(category)
except ValueError:
if category == '':
order = len(self.categories[self.objective_id])
probabilities[category] = {"category": category,
"probability": probability,
"order": order}
total += probabilities[category]["probability"]
# Normalizes the contributions to get a probability
for category in probabilities:
probabilities[category]["probability"] /= total
probabilities[category]["probability"] = round( \
probabilities[category]["probability"], PRECISION)
# Chooses the most probable category as prediction
predictions = sorted(list(probabilities.items()),
key=lambda x: (x[1]["probability"],
- x[1]["order"]), reverse=True)
for prediction, probability in predictions:
del probability['order']
prediction, probability = predictions[0]
result = {
"prediction": prediction,
"probability": probability["probability"],
"distribution": [{"category": category,
"probability": probability["probability"]}
for category, probability in predictions]}
if full:
result.update({'unused_fields': unused_fields})
else:
result = result["prediction"]
return result
def category_probability(self, numeric_inputs, unique_terms, category):
"""Computes the probability for a concrete category
"""
probability = 0
norm2 = 0
# numeric input data
for field_id in numeric_inputs:
coefficients = self.get_coefficients(category, field_id)
probability += coefficients[0] * numeric_inputs[field_id]
if self.lr_normalize:
norm2 += math.pow(numeric_inputs[field_id], 2)
# text, items and categories
for field_id in unique_terms:
if field_id in self.input_fields:
coefficients = self.get_coefficients(category, field_id)
for term, occurrences in unique_terms[field_id]:
try:
one_hot = True
if field_id in self.tag_clouds:
index = self.tag_clouds[field_id].index(term)
elif field_id in self.items:
index = self.items[field_id].index(term)
elif field_id in self.categories and ( \
not field_id in self.field_codings or \
list(self.field_codings[field_id].keys())[0] == \
"dummy"):
index = self.categories[field_id].index(term)
elif field_id in self.categories:
one_hot = False
index = self.categories[field_id].index(term)
coeff_index = 0
for contribution in \
list(self.field_codings[field_id].values())[0]:
probability += \
coefficients[coeff_index] * \
contribution[index] * occurrences
coeff_index += 1
if one_hot:
probability += coefficients[index] * \
occurrences
norm2 += math.pow(occurrences, 2)
except ValueError:
pass
# missings
for field_id in self.input_fields:
contribution = False
coefficients = self.get_coefficients(category, field_id)
if field_id in self.numeric_fields and \
field_id not in numeric_inputs:
probability += coefficients[1]
contribution = True
elif field_id in self.tag_clouds and (field_id not in \
unique_terms \
or not unique_terms[field_id]):
probability += coefficients[ \
len(self.tag_clouds[field_id])]
contribution = True
elif field_id in self.items and (field_id not in \
unique_terms \
or not unique_terms[field_id]):
probability += coefficients[len(self.items[field_id])]
contribution = True
elif field_id in self.categories and \
field_id != self.objective_id and \
field_id not in unique_terms:
if field_id not in self.field_codings or \
list(self.field_codings[field_id].keys())[0] == "dummy":
probability += coefficients[ \
len(self.categories[field_id])]
else:
coeff_index = 0
for contribution in \
list(self.field_codings[field_id].values())[0]:
probability += coefficients[coeff_index] * \
contribution[-1]
coeff_index += 1
contribution = True
if contribution and self.lr_normalize:
norm2 += 1
# the bias term is the last in the coefficients list
probability += self.coefficients[category][\
len(self.coefficients[category]) - 1][0]
if self.bias:
norm2 += 1
if self.lr_normalize:
try:
probability /= math.sqrt(norm2)
except ZeroDivisionError:
# this should never happen
probability = float('NaN')
try:
probability = 1 / (1 + math.exp(-probability))
except OverflowError:
probability = 0 if probability < 0 else 1
# truncate probability to 5 digits, as in the backend
probability = round(probability, 5)
return probability
def map_coefficients(self):
""" Maps each field to the corresponding coefficients subarray
"""
field_ids = [ \
field_id for field_id in self.input_fields
if field_id != self.objective_id]
shift = 0
for field_id in field_ids:
optype = self.fields[field_id]['optype']
if optype in list(EXPANSION_ATTRIBUTES.keys()):
# text and items fields have one coefficient per
# text plus a missing terms coefficient plus a bias
# coefficient
# categorical fields too, unless they use a non-default
# field coding.
if optype != 'categorical' or \
not field_id in self.field_codings or \
list(self.field_codings[field_id].keys())[0] == "dummy":
length = len(self.fields[field_id]['summary'][ \
EXPANSION_ATTRIBUTES[optype]])
# missing coefficient
length += 1
else:
length = len(list(self.field_codings[field_id].values())[0])
else:
# numeric fields have one coefficient and an additional one
# if self.missing_numerics is True
length = 2 if self.missing_numerics else 1
self.fields[field_id]['coefficients_shift'] = shift
self.fields[field_id]['coefficients_length'] = length
shift += length
self.group_coefficients()
def get_coefficients(self, category, field_id):
""" Returns the set of coefficients for the given category and fieldIds
"""
coeff_index = self.input_fields.index(field_id)
return self.coefficients[category][coeff_index]
def group_coefficients(self):
""" Groups the coefficients of the flat array in old formats to the
grouped array, as used in the current notation
"""
coefficients = copy.deepcopy(self.coefficients)
self.flat_coefficients = coefficients
for category in coefficients:
self.coefficients[category] = []
for field_id in self.input_fields:
shift = self.fields[field_id]['coefficients_shift']
length = self.fields[field_id]['coefficients_length']
coefficients_group = \
coefficients[category][shift : length + shift]
self.coefficients[category].append(coefficients_group)
self.coefficients[category].append( \
[coefficients[category][len(coefficients[category]) - 1]])
def format_field_codings(self):
""" Changes the field codings format to the dict notation
"""
if isinstance(self.field_codings, list):
self.field_codings_list = self.field_codings[:]
field_codings = self.field_codings[:]
self.field_codings = {}
for element in field_codings:
field_id = element['field']
if element["coding"] == "dummy":
self.field_codings[field_id] = {\
element["coding"]: element['dummy_class']}
else:
self.field_codings[field_id] = {\
element["coding"]: element['coefficients']}
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_delete_request(
scope: str,
policy_assignment_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}')
path_format_arguments = {
"scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
"policyAssignmentName": _SERIALIZER.url("policy_assignment_name", policy_assignment_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request(
scope: str,
policy_assignment_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}')
path_format_arguments = {
"scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
"policyAssignmentName": _SERIALIZER.url("policy_assignment_name", policy_assignment_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
scope: str,
policy_assignment_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}')
path_format_arguments = {
"scope": _SERIALIZER.url("scope", scope, 'str', skip_quote=True),
"policyAssignmentName": _SERIALIZER.url("policy_assignment_name", policy_assignment_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_for_resource_group_request(
resource_group_name: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_for_resource_request(
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyAssignments')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"resourceProviderNamespace": _SERIALIZER.url("resource_provider_namespace", resource_provider_namespace, 'str'),
"parentResourcePath": _SERIALIZER.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
"resourceType": _SERIALIZER.url("resource_type", resource_type, 'str', skip_quote=True),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyAssignments')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_by_id_request(
policy_assignment_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/{policyAssignmentId}')
path_format_arguments = {
"policyAssignmentId": _SERIALIZER.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_by_id_request(
policy_assignment_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/{policyAssignmentId}')
path_format_arguments = {
"policyAssignmentId": _SERIALIZER.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_by_id_request(
policy_assignment_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/{policyAssignmentId}')
path_format_arguments = {
"policyAssignmentId": _SERIALIZER.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PolicyAssignmentsOperations(object):
"""PolicyAssignmentsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.policy.v2018_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def delete(
self,
scope: str,
policy_assignment_name: str,
**kwargs: Any
) -> Optional["_models.PolicyAssignment"]:
"""Deletes a policy assignment.
This operation deletes a policy assignment, given its name and the scope it was created in. The
scope of a policy assignment is the part of its ID preceding
'/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'.
:param scope: The scope of the policy assignment. Valid scopes are: management group (format:
'/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
'/subscriptions/{subscriptionId}'), resource group (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to delete.
:type policy_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignment or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PolicyAssignment"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
scope=scope,
policy_assignment_name=policy_assignment_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'} # type: ignore
@distributed_trace
def create(
self,
scope: str,
policy_assignment_name: str,
parameters: "_models.PolicyAssignment",
**kwargs: Any
) -> "_models.PolicyAssignment":
"""Creates or updates a policy assignment.
This operation creates or updates a policy assignment with the given scope and name. Policy
assignments apply to all resources contained within their scope. For example, when you assign a
policy at resource group scope, that policy applies to all resources in the group.
:param scope: The scope of the policy assignment. Valid scopes are: management group (format:
'/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
'/subscriptions/{subscriptionId}'), resource group (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'.
:type scope: str
:param policy_assignment_name: The name of the policy assignment.
:type policy_assignment_name: str
:param parameters: Parameters for the policy assignment.
:type parameters: ~azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PolicyAssignment')
request = build_create_request(
scope=scope,
policy_assignment_name=policy_assignment_name,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'} # type: ignore
@distributed_trace
def get(
self,
scope: str,
policy_assignment_name: str,
**kwargs: Any
) -> "_models.PolicyAssignment":
"""Retrieves a policy assignment.
This operation retrieves a single policy assignment, given its name and the scope it was
created at.
:param scope: The scope of the policy assignment. Valid scopes are: management group (format:
'/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
'/subscriptions/{subscriptionId}'), resource group (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to get.
:type policy_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
scope=scope,
policy_assignment_name=policy_assignment_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'} # type: ignore
@distributed_trace
def list_for_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PolicyAssignmentListResult"]:
"""Retrieves all policy assignments that apply to a resource group.
This operation retrieves the list of all policy assignments associated with the given resource
group in the given subscription that match the optional given $filter. Valid values for $filter
are: 'atScope()' or 'policyDefinitionId eq '{value}''. If $filter is not provided, the
unfiltered list includes all policy assignments associated with the resource group, including
those that apply directly or apply from containing scopes, as well as any applied to resources
contained within the resource group. If $filter=atScope() is provided, the returned list
includes all policy assignments that apply to the resource group, which is everything in the
unfiltered list except those applied to resources contained within the resource group. If
$filter=policyDefinitionId eq '{value}' is provided, the returned list includes all policy
assignments of the policy definition whose id is {value} that apply to the resource group.
:param resource_group_name: The name of the resource group that contains policy assignments.
:type resource_group_name: str
:param filter: The filter to apply on the operation. Valid values for $filter are: 'atScope()'
or 'policyDefinitionId eq '{value}''. If $filter is not provided, no filtering is performed.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignmentListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_for_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list_for_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_for_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments'} # type: ignore
@distributed_trace
def list_for_resource(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PolicyAssignmentListResult"]:
"""Retrieves all policy assignments that apply to a resource.
This operation retrieves the list of all policy assignments associated with the specified
resource in the given resource group and subscription that match the optional given $filter.
Valid values for $filter are: 'atScope()' or 'policyDefinitionId eq '{value}''. If $filter is
not provided, the unfiltered list includes all policy assignments associated with the resource,
including those that apply directly or from all containing scopes, as well as any applied to
resources contained within the resource. If $filter=atScope() is provided, the returned list
includes all policy assignments that apply to the resource, which is everything in the
unfiltered list except those applied to resources contained within the resource. If
$filter=policyDefinitionId eq '{value}' is provided, the returned list includes all policy
assignments of the policy definition whose id is {value} that apply to the resource. Three
parameters plus the resource name are used to identify a specific resource. If the resource is
not part of a parent resource (the more common case), the parent resource path should not be
provided (or provided as ''). For example a web app could be specified as
({resourceProviderNamespace} == 'Microsoft.Web', {parentResourcePath} == '', {resourceType} ==
'sites', {resourceName} == 'MyWebApp'). If the resource is part of a parent resource, then all
parameters should be provided. For example a virtual machine DNS name could be specified as
({resourceProviderNamespace} == 'Microsoft.Compute', {parentResourcePath} ==
'virtualMachines/MyVirtualMachine', {resourceType} == 'domainNames', {resourceName} ==
'MyComputerName'). A convenient alternative to providing the namespace and type name separately
is to provide both in the {resourceType} parameter, format: ({resourceProviderNamespace} == '',
{parentResourcePath} == '', {resourceType} == 'Microsoft.Web/sites', {resourceName} ==
'MyWebApp').
:param resource_group_name: The name of the resource group containing the resource.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider. For example, the
namespace of a virtual machine is Microsoft.Compute (from Microsoft.Compute/virtualMachines).
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource path. Use empty string if there is none.
:type parent_resource_path: str
:param resource_type: The resource type name. For example the type name of a web app is 'sites'
(from Microsoft.Web/sites).
:type resource_type: str
:param resource_name: The name of the resource.
:type resource_name: str
:param filter: The filter to apply on the operation. Valid values for $filter are: 'atScope()'
or 'policyDefinitionId eq '{value}''. If $filter is not provided, no filtering is performed.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignmentListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_for_resource_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list_for_resource.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_for_resource_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_for_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyAssignments'} # type: ignore
@distributed_trace
def list(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PolicyAssignmentListResult"]:
"""Retrieves all policy assignments that apply to a subscription.
This operation retrieves the list of all policy assignments associated with the given
subscription that match the optional given $filter. Valid values for $filter are: 'atScope()'
or 'policyDefinitionId eq '{value}''. If $filter is not provided, the unfiltered list includes
all policy assignments associated with the subscription, including those that apply directly or
from management groups that contain the given subscription, as well as any applied to objects
contained within the subscription. If $filter=atScope() is provided, the returned list includes
all policy assignments that apply to the subscription, which is everything in the unfiltered
list except those applied to objects contained within the subscription. If
$filter=policyDefinitionId eq '{value}' is provided, the returned list includes all policy
assignments of the policy definition whose id is {value}.
:param filter: The filter to apply on the operation. Valid values for $filter are: 'atScope()'
or 'policyDefinitionId eq '{value}''. If $filter is not provided, no filtering is performed.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignmentListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyAssignments'} # type: ignore
@distributed_trace
def delete_by_id(
self,
policy_assignment_id: str,
**kwargs: Any
) -> Optional["_models.PolicyAssignment"]:
"""Deletes a policy assignment.
This operation deletes the policy with the given ID. Policy assignment IDs have this format:
'{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'. Valid
formats for {scope} are: '/providers/Microsoft.Management/managementGroups/{managementGroup}'
(management group), '/subscriptions/{subscriptionId}' (subscription),
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' (resource group), or
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'
(resource).
:param policy_assignment_id: The ID of the policy assignment to delete. Use the format
'{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'.
:type policy_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignment or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PolicyAssignment"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_by_id_request(
policy_assignment_id=policy_assignment_id,
template_url=self.delete_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_by_id.metadata = {'url': '/{policyAssignmentId}'} # type: ignore
@distributed_trace
def create_by_id(
self,
policy_assignment_id: str,
parameters: "_models.PolicyAssignment",
**kwargs: Any
) -> "_models.PolicyAssignment":
"""Creates or updates a policy assignment.
This operation creates or updates the policy assignment with the given ID. Policy assignments
made on a scope apply to all resources contained in that scope. For example, when you assign a
policy to a resource group that policy applies to all resources in the group. Policy assignment
IDs have this format:
'{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'. Valid
scopes are: management group (format:
'/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
'/subscriptions/{subscriptionId}'), resource group (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'.
:param policy_assignment_id: The ID of the policy assignment to create. Use the format
'{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'.
:type policy_assignment_id: str
:param parameters: Parameters for policy assignment.
:type parameters: ~azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'PolicyAssignment')
request = build_create_by_id_request(
policy_assignment_id=policy_assignment_id,
content_type=content_type,
json=_json,
template_url=self.create_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_by_id.metadata = {'url': '/{policyAssignmentId}'} # type: ignore
@distributed_trace
def get_by_id(
self,
policy_assignment_id: str,
**kwargs: Any
) -> "_models.PolicyAssignment":
"""Retrieves the policy assignment with the given ID.
The operation retrieves the policy assignment with the given ID. Policy assignment IDs have
this format:
'{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'. Valid
scopes are: management group (format:
'/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format:
'/subscriptions/{subscriptionId}'), resource group (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}', or resource (format:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/[{parentResourcePath}/]{resourceType}/{resourceName}'.
:param policy_assignment_id: The ID of the policy assignment to get. Use the format
'{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'.
:type policy_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2018_05_01.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_by_id_request(
policy_assignment_id=policy_assignment_id,
template_url=self.get_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/{policyAssignmentId}'} # type: ignore
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The abstract :py:class:`Target` class.
It is a central concept of Luigi and represents the state of the workflow.
"""
import abc
import io
import os
import random
import tempfile
import logging
import warnings
from luigi import six
logger = logging.getLogger('luigi-interface')
@six.add_metaclass(abc.ABCMeta)
class Target(object):
"""
A Target is a resource generated by a :py:class:`~luigi.task.Task`.
For example, a Target might correspond to a file in HDFS or data in a database. The Target
interface defines one method that must be overridden: :py:meth:`exists`, which signifies if the
Target has been created or not.
Typically, a :py:class:`~luigi.task.Task` will define one or more Targets as output, and the Task
is considered complete if and only if each of its output Targets exist.
"""
@abc.abstractmethod
def exists(self):
"""
Returns ``True`` if the :py:class:`Target` exists and ``False`` otherwise.
"""
pass
class FileSystemException(Exception):
"""
Base class for generic file system exceptions.
"""
pass
class FileAlreadyExists(FileSystemException):
"""
Raised when a file system operation can't be performed because
a directory exists but is required to not exist.
"""
pass
class MissingParentDirectory(FileSystemException):
"""
Raised when a parent directory doesn't exist.
(Imagine mkdir without -p)
"""
pass
class NotADirectory(FileSystemException):
"""
Raised when a file system operation can't be performed because
an expected directory is actually a file.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class FileSystem(object):
"""
FileSystem abstraction used in conjunction with :py:class:`FileSystemTarget`.
Typically, a FileSystem is associated with instances of a :py:class:`FileSystemTarget`. The
instances of the py:class:`FileSystemTarget` will delegate methods such as
:py:meth:`FileSystemTarget.exists` and :py:meth:`FileSystemTarget.remove` to the FileSystem.
Methods of FileSystem raise :py:class:`FileSystemException` if there is a problem completing the
operation.
"""
@abc.abstractmethod
def exists(self, path):
"""
Return ``True`` if file or directory at ``path`` exist, ``False`` otherwise
:param str path: a path within the FileSystem to check for existence.
"""
pass
@abc.abstractmethod
def remove(self, path, recursive=True, skip_trash=True):
""" Remove file or directory at location ``path``
:param str path: a path within the FileSystem to remove.
:param bool recursive: if the path is a directory, recursively remove the directory and all
of its descendants. Defaults to ``True``.
"""
pass
def mkdir(self, path, parents=True, raise_if_exists=False):
"""
Create directory at location ``path``
Creates the directory at ``path`` and implicitly create parent
directories if they do not already exist.
:param str path: a path within the FileSystem to create as a directory.
:param bool parents: Create parent directories when necessary. When
parents=False and the parent directory doesn't
exist, raise luigi.target.MissingParentDirectory
:param bool raise_if_exists: raise luigi.target.FileAlreadyExists if
the folder already exists.
"""
raise NotImplementedError("mkdir() not implemented on {0}".format(self.__class__.__name__))
def isdir(self, path):
"""
Return ``True`` if the location at ``path`` is a directory. If not, return ``False``.
:param str path: a path within the FileSystem to check as a directory.
*Note*: This method is optional, not all FileSystem subclasses implements it.
"""
raise NotImplementedError("isdir() not implemented on {0}".format(self.__class__.__name__))
def listdir(self, path):
"""Return a list of files rooted in path.
This returns an iterable of the files rooted at ``path``. This is intended to be a
recursive listing.
:param str path: a path within the FileSystem to list.
*Note*: This method is optional, not all FileSystem subclasses implements it.
"""
raise NotImplementedError("listdir() not implemented on {0}".format(self.__class__.__name__))
def move(self, path, dest):
"""
Move a file, as one would expect.
"""
raise NotImplementedError("move() not implemented on {0}".format(self.__class__.__name__))
def rename_dont_move(self, path, dest):
"""
Potentially rename ``path`` to ``dest``, but don't move it into the
``dest`` folder (if it is a folder). This relates to :ref:`AtomicWrites`.
This method has a reasonable but not bullet proof default
implementation. It will just do ``move()`` if the file doesn't
``exists()`` already.
"""
warnings.warn("File system {} client doesn't support atomic mv.".format(self.__class__.__name__))
if self.exists(dest):
raise FileAlreadyExists()
self.move(path, dest)
def rename(self, *args, **kwargs):
"""
Alias for ``move()``
"""
self.move(*args, **kwargs)
def copy(self, path, dest):
"""
Copy a file or a directory with contents.
Currently, LocalFileSystem and MockFileSystem support only single file
copying but S3Client copies either a file or a directory as required.
"""
raise NotImplementedError("copy() not implemented on {0}".
format(self.__class__.__name__))
class FileSystemTarget(Target):
"""
Base class for FileSystem Targets like :class:`~luigi.file.LocalTarget` and :class:`~luigi.contrib.hdfs.HdfsTarget`.
A FileSystemTarget has an associated :py:class:`FileSystem` to which certain operations can be
delegated. By default, :py:meth:`exists` and :py:meth:`remove` are delegated to the
:py:class:`FileSystem`, which is determined by the :py:attr:`fs` property.
Methods of FileSystemTarget raise :py:class:`FileSystemException` if there is a problem
completing the operation.
"""
def __init__(self, path):
"""
Initializes a FileSystemTarget instance.
:param str path: the path associated with this FileSystemTarget.
"""
self.path = path
@abc.abstractproperty
def fs(self):
"""
The :py:class:`FileSystem` associated with this FileSystemTarget.
"""
raise NotImplementedError()
@abc.abstractmethod
def open(self, mode):
"""
Open the FileSystem target.
This method returns a file-like object which can either be read from or written to depending
on the specified mode.
:param str mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will
open the FileSystemTarget in write mode. Subclasses can implement
additional options.
"""
pass
def exists(self):
"""
Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise.
This method is implemented by using :py:attr:`fs`.
"""
path = self.path
if '*' in path or '?' in path or '[' in path or '{' in path:
logger.warning("Using wildcards in path %s might lead to processing of an incomplete dataset; "
"override exists() to suppress the warning.", path)
return self.fs.exists(path)
def remove(self):
"""
Remove the resource at the path specified by this FileSystemTarget.
This method is implemented by using :py:attr:`fs`.
"""
self.fs.remove(self.path)
def temporary_path(self):
"""
A context manager that enables a reasonably short, general and
magic-less way to solve the :ref:`AtomicWrites`.
* On *entering*, it will create the parent directories so the
temporary_path is writeable right away.
This step uses :py:meth:`FileSystem.mkdir`.
* On *exiting*, it will move the temporary file if there was no exception thrown.
This step uses :py:meth:`FileSystem.rename_dont_move`
The file system operations will be carried out by calling them on :py:attr:`fs`.
The typical use case looks like this:
.. code:: python
class MyTask(luigi.Task):
def output(self):
return MyFileSystemTarget(...)
def run(self):
with self.output().temporary_path() as self.temp_output_path:
run_some_external_command(output_path=self.temp_output_path)
"""
class _Manager(object):
target = self
def __init__(self):
num = random.randrange(0, 1e10)
slashless_path = self.target.path.rstrip('/').rstrip("\\")
self._temp_path = '{}-luigi-tmp-{:010}{}'.format(
slashless_path,
num,
self.target._trailing_slash())
# TODO: os.path doesn't make sense here as it's os-dependent
tmp_dir = os.path.dirname(slashless_path)
self.target.fs.mkdir(tmp_dir, parents=True, raise_if_exists=False)
def __enter__(self):
return self._temp_path
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
# There were no exceptions
self.target.fs.rename_dont_move(self._temp_path, self.target.path)
return False # False means we don't suppress the exception
return _Manager()
def _touchz(self):
with self.open('w'):
pass
def _trailing_slash(self):
# I suppose one day schema-like paths, like
# file:///path/blah.txt?params=etc can be parsed too
return self.path[-1] if self.path[-1] in r'\/' else ''
class AtomicLocalFile(io.BufferedWriter):
"""Abstract class to create a Target that creates
a temporary file in the local filesystem before
moving it to its final destination.
This class is just for the writing part of the Target. See
:class:`luigi.file.LocalTarget` for example
"""
def __init__(self, path):
self.__tmp_path = self.generate_tmp_path(path)
self.path = path
super(AtomicLocalFile, self).__init__(io.FileIO(self.__tmp_path, 'w'))
def close(self):
super(AtomicLocalFile, self).close()
self.move_to_final_destination()
def generate_tmp_path(self, path):
return os.path.join(tempfile.gettempdir(), 'luigi-s3-tmp-%09d' % random.randrange(0, 1e10))
def move_to_final_destination(self):
raise NotImplementedError()
def __del__(self):
if os.path.exists(self.tmp_path):
os.remove(self.tmp_path)
@property
def tmp_path(self):
return self.__tmp_path
def __exit__(self, exc_type, exc, traceback):
" Close/commit the file if there are no exception "
if exc_type:
return
return super(AtomicLocalFile, self).__exit__(exc_type, exc, traceback)
|
|
data = (
'Kay ', # 0x00
'Kayng ', # 0x01
'Ke ', # 0x02
'Ko ', # 0x03
'Kol ', # 0x04
'Koc ', # 0x05
'Kwi ', # 0x06
'Kwi ', # 0x07
'Kyun ', # 0x08
'Kul ', # 0x09
'Kum ', # 0x0a
'Na ', # 0x0b
'Na ', # 0x0c
'Na ', # 0x0d
'La ', # 0x0e
'Na ', # 0x0f
'Na ', # 0x10
'Na ', # 0x11
'Na ', # 0x12
'Na ', # 0x13
'Nak ', # 0x14
'Nak ', # 0x15
'Nak ', # 0x16
'Nak ', # 0x17
'Nak ', # 0x18
'Nak ', # 0x19
'Nak ', # 0x1a
'Nan ', # 0x1b
'Nan ', # 0x1c
'Nan ', # 0x1d
'Nan ', # 0x1e
'Nan ', # 0x1f
'Nan ', # 0x20
'Nam ', # 0x21
'Nam ', # 0x22
'Nam ', # 0x23
'Nam ', # 0x24
'Nap ', # 0x25
'Nap ', # 0x26
'Nap ', # 0x27
'Nang ', # 0x28
'Nang ', # 0x29
'Nang ', # 0x2a
'Nang ', # 0x2b
'Nang ', # 0x2c
'Nay ', # 0x2d
'Nayng ', # 0x2e
'No ', # 0x2f
'No ', # 0x30
'No ', # 0x31
'No ', # 0x32
'No ', # 0x33
'No ', # 0x34
'No ', # 0x35
'No ', # 0x36
'No ', # 0x37
'No ', # 0x38
'No ', # 0x39
'No ', # 0x3a
'Nok ', # 0x3b
'Nok ', # 0x3c
'Nok ', # 0x3d
'Nok ', # 0x3e
'Nok ', # 0x3f
'Nok ', # 0x40
'Non ', # 0x41
'Nong ', # 0x42
'Nong ', # 0x43
'Nong ', # 0x44
'Nong ', # 0x45
'Noy ', # 0x46
'Noy ', # 0x47
'Noy ', # 0x48
'Noy ', # 0x49
'Nwu ', # 0x4a
'Nwu ', # 0x4b
'Nwu ', # 0x4c
'Nwu ', # 0x4d
'Nwu ', # 0x4e
'Nwu ', # 0x4f
'Nwu ', # 0x50
'Nwu ', # 0x51
'Nuk ', # 0x52
'Nuk ', # 0x53
'Num ', # 0x54
'Nung ', # 0x55
'Nung ', # 0x56
'Nung ', # 0x57
'Nung ', # 0x58
'Nung ', # 0x59
'Twu ', # 0x5a
'La ', # 0x5b
'Lak ', # 0x5c
'Lak ', # 0x5d
'Lan ', # 0x5e
'Lyeng ', # 0x5f
'Lo ', # 0x60
'Lyul ', # 0x61
'Li ', # 0x62
'Pey ', # 0x63
'Pen ', # 0x64
'Pyen ', # 0x65
'Pwu ', # 0x66
'Pwul ', # 0x67
'Pi ', # 0x68
'Sak ', # 0x69
'Sak ', # 0x6a
'Sam ', # 0x6b
'Sayk ', # 0x6c
'Sayng ', # 0x6d
'Sep ', # 0x6e
'Sey ', # 0x6f
'Sway ', # 0x70
'Sin ', # 0x71
'Sim ', # 0x72
'Sip ', # 0x73
'Ya ', # 0x74
'Yak ', # 0x75
'Yak ', # 0x76
'Yang ', # 0x77
'Yang ', # 0x78
'Yang ', # 0x79
'Yang ', # 0x7a
'Yang ', # 0x7b
'Yang ', # 0x7c
'Yang ', # 0x7d
'Yang ', # 0x7e
'Ye ', # 0x7f
'Ye ', # 0x80
'Ye ', # 0x81
'Ye ', # 0x82
'Ye ', # 0x83
'Ye ', # 0x84
'Ye ', # 0x85
'Ye ', # 0x86
'Ye ', # 0x87
'Ye ', # 0x88
'Ye ', # 0x89
'Yek ', # 0x8a
'Yek ', # 0x8b
'Yek ', # 0x8c
'Yek ', # 0x8d
'Yen ', # 0x8e
'Yen ', # 0x8f
'Yen ', # 0x90
'Yen ', # 0x91
'Yen ', # 0x92
'Yen ', # 0x93
'Yen ', # 0x94
'Yen ', # 0x95
'Yen ', # 0x96
'Yen ', # 0x97
'Yen ', # 0x98
'Yen ', # 0x99
'Yen ', # 0x9a
'Yen ', # 0x9b
'Yel ', # 0x9c
'Yel ', # 0x9d
'Yel ', # 0x9e
'Yel ', # 0x9f
'Yel ', # 0xa0
'Yel ', # 0xa1
'Yem ', # 0xa2
'Yem ', # 0xa3
'Yem ', # 0xa4
'Yem ', # 0xa5
'Yem ', # 0xa6
'Yep ', # 0xa7
'Yeng ', # 0xa8
'Yeng ', # 0xa9
'Yeng ', # 0xaa
'Yeng ', # 0xab
'Yeng ', # 0xac
'Yeng ', # 0xad
'Yeng ', # 0xae
'Yeng ', # 0xaf
'Yeng ', # 0xb0
'Yeng ', # 0xb1
'Yeng ', # 0xb2
'Yeng ', # 0xb3
'Yeng ', # 0xb4
'Yey ', # 0xb5
'Yey ', # 0xb6
'Yey ', # 0xb7
'Yey ', # 0xb8
'O ', # 0xb9
'Yo ', # 0xba
'Yo ', # 0xbb
'Yo ', # 0xbc
'Yo ', # 0xbd
'Yo ', # 0xbe
'Yo ', # 0xbf
'Yo ', # 0xc0
'Yo ', # 0xc1
'Yo ', # 0xc2
'Yo ', # 0xc3
'Yong ', # 0xc4
'Wun ', # 0xc5
'Wen ', # 0xc6
'Yu ', # 0xc7
'Yu ', # 0xc8
'Yu ', # 0xc9
'Yu ', # 0xca
'Yu ', # 0xcb
'Yu ', # 0xcc
'Yu ', # 0xcd
'Yu ', # 0xce
'Yu ', # 0xcf
'Yu ', # 0xd0
'Yuk ', # 0xd1
'Yuk ', # 0xd2
'Yuk ', # 0xd3
'Yun ', # 0xd4
'Yun ', # 0xd5
'Yun ', # 0xd6
'Yun ', # 0xd7
'Yul ', # 0xd8
'Yul ', # 0xd9
'Yul ', # 0xda
'Yul ', # 0xdb
'Yung ', # 0xdc
'I ', # 0xdd
'I ', # 0xde
'I ', # 0xdf
'I ', # 0xe0
'I ', # 0xe1
'I ', # 0xe2
'I ', # 0xe3
'I ', # 0xe4
'I ', # 0xe5
'I ', # 0xe6
'I ', # 0xe7
'I ', # 0xe8
'I ', # 0xe9
'I ', # 0xea
'Ik ', # 0xeb
'Ik ', # 0xec
'In ', # 0xed
'In ', # 0xee
'In ', # 0xef
'In ', # 0xf0
'In ', # 0xf1
'In ', # 0xf2
'In ', # 0xf3
'Im ', # 0xf4
'Im ', # 0xf5
'Im ', # 0xf6
'Ip ', # 0xf7
'Ip ', # 0xf8
'Ip ', # 0xf9
'Cang ', # 0xfa
'Cek ', # 0xfb
'Ci ', # 0xfc
'Cip ', # 0xfd
'Cha ', # 0xfe
'Chek ', # 0xff
)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import inspect
import os
import random
import signal
import sys
import time
import eventlet
import greenlet
from oslo.config import cfg
from nova import conductor
from nova import context
from nova import exception
from nova.openstack.common import eventlet_backdoor
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import servicegroup
from nova import utils
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='seconds between nodes reporting state to datastore'),
cfg.BoolOpt('periodic_enable',
default=True,
help='enable periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute', 'metadata'],
help='a list of APIs to enable by default'),
cfg.ListOpt('enabled_ssl_apis',
default=[],
help='a list of APIs with enabled SSL'),
cfg.StrOpt('ec2_listen',
default="0.0.0.0",
help='IP address for EC2 API to listen'),
cfg.IntOpt('ec2_listen_port',
default=8773,
help='port for ec2 api to listen'),
cfg.IntOpt('ec2_workers',
default=None,
help='Number of workers for EC2 API service'),
cfg.StrOpt('osapi_compute_listen',
default="0.0.0.0",
help='IP address for OpenStack API to listen'),
cfg.IntOpt('osapi_compute_listen_port',
default=8774,
help='list port for osapi compute'),
cfg.IntOpt('osapi_compute_workers',
default=None,
help='Number of workers for OpenStack API service'),
cfg.StrOpt('metadata_manager',
default='nova.api.manager.MetadataManager',
help='OpenStack metadata service manager'),
cfg.StrOpt('metadata_listen',
default="0.0.0.0",
help='IP address for metadata api to listen'),
cfg.IntOpt('metadata_listen_port',
default=8775,
help='port for metadata api to listen'),
cfg.IntOpt('metadata_workers',
default=None,
help='Number of workers for metadata service'),
cfg.StrOpt('compute_manager',
default='nova.compute.manager.ComputeManager',
help='full class name for the Manager for compute'),
cfg.StrOpt('console_manager',
default='nova.console.manager.ConsoleProxyManager',
help='full class name for the Manager for console proxy'),
cfg.StrOpt('cert_manager',
default='nova.cert.manager.CertManager',
help='full class name for the Manager for cert'),
cfg.StrOpt('network_manager',
default='nova.network.manager.VlanManager',
help='full class name for the Manager for network'),
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='full class name for the Manager for scheduler'),
cfg.IntOpt('service_down_time',
default=60,
help='maximum time since last check-in for up service'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.import_opt('host', 'nova.netconf')
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self._services = []
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
@staticmethod
def run_server(server):
"""Start and wait for a server to finish.
:param service: Server to run and wait for.
:returns: None
"""
server.start()
server.wait()
def launch_server(self, server):
"""Load and start the given server.
:param server: The server you would like to start.
:returns: None
"""
if self.backdoor_port is not None:
server.backdoor_port = self.backdoor_port
gt = eventlet.spawn(self.run_server, server)
self._services.append(gt)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
for service in self._services:
service.kill()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
for service in self._services:
try:
service.wait()
except greenlet.GreenletExit:
pass
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
raise SignalExit(signo)
def wait(self):
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
LOG.debug(_('Full set of CONF:'))
for flag in CONF:
flag_get = CONF.get(flag, None)
# hide flag contents from log if contains a password
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and "mysql:" in flag_get)):
LOG.debug(_('%(flag)s : FLAG SET ') % locals())
else:
LOG.debug('%(flag)s : %(flag_get)s' % locals())
status = None
try:
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
finally:
self.stop()
rpc.cleanup()
if status is not None:
sys.exit(status)
class ServerWrapper(object):
def __init__(self, server, workers):
self.server = server
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self):
self.children = {}
self.sigcaught = None
self.running = True
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process(self, server):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
signal.signal(signal.SIGTERM, _sigterm)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.run_server(server)
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
status = 0
try:
self._child_process(wrap.server)
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
status = 2
finally:
wrap.server.stop()
os._exit(status)
LOG.info(_('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_server(self, server, workers=1):
wrap = ServerWrapper(server, workers)
LOG.info(_('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
pid, status = os.wait()
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_('Child %(pid)d killed by signal %(sig)d'), locals())
else:
code = os.WEXITSTATUS(status)
LOG.info(_('Child %(pid)d exited with status %(code)d'), locals())
if pid not in self.children:
LOG.warning(_('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
while self.running:
wrap = self._wait_child()
if not wrap:
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
if self.sigcaught:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[self.sigcaught]
LOG.info(_('Caught %s, stopping children'), signame)
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table."""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_enable=None, periodic_fuzzy_delay=None,
periodic_interval_max=None, db_allowed=True,
*args, **kwargs):
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
# NOTE(russellb) We want to make sure to create the servicegroup API
# instance early, before creating other things such as the manager,
# that will also create a servicegroup API instance. Internally, the
# servicegroup only allocates a single instance of the driver API and
# we want to make sure that our value of db_allowed is there when it
# gets created. For that to happen, this has to be the first instance
# of the servicegroup API.
self.servicegroup_api = servicegroup.API(db_allowed=db_allowed)
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *args, **kwargs)
self.report_interval = report_interval
self.periodic_enable = periodic_enable
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.periodic_interval_max = periodic_interval_max
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
self.backdoor_port = None
self.conductor_api = conductor.API(use_local=db_allowed)
self.conductor_api.wait_until_ready(context.get_admin_context())
def start(self):
verstr = version.version_string_with_package()
LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
self.service_ref = self.conductor_api.service_get_by_args(ctxt,
self.host, self.binary)
self.service_id = self.service_ref['id']
except exception.NotFound:
self.service_ref = self._create_service_ref(ctxt)
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
self.manager.pre_start_hook(rpc_connection=self.conn)
rpc_dispatcher = self.manager.create_rpc_dispatcher()
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
self.manager.post_start_hook()
LOG.debug(_("Join ServiceGroup membership for this service %s")
% self.topic)
# Add service to the ServiceGroup membership group.
pulse = self.servicegroup_api.join(self.host, self.topic, self)
if pulse:
self.timers.append(pulse)
if self.periodic_enable:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = utils.DynamicLoopingCall(self.periodic_tasks)
periodic.start(initial_delay=initial_delay,
periodic_interval_max=self.periodic_interval_max)
self.timers.append(periodic)
def _create_service_ref(self, context):
svc_values = {
'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0
}
service = self.conductor_api.service_create(context, svc_values)
self.service_id = service['id']
return service
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_enable=None,
periodic_fuzzy_delay=None, periodic_interval_max=None,
db_allowed=True):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'nova-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_enable: defaults to CONF.periodic_enable
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
:param periodic_interval_max: if set, the max time to wait between runs
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary.rpartition('nova-')[2]
if not manager:
manager_cls = ('%s_manager' %
binary.rpartition('nova-')[2])
manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_enable is None:
periodic_enable = CONF.periodic_enable
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_enable=periodic_enable,
periodic_fuzzy_delay=periodic_fuzzy_delay,
periodic_interval_max=periodic_interval_max,
db_allowed=db_allowed)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
self.conductor_api.service_destroy(context.get_admin_context(),
self.service_id)
except exception.NotFound:
LOG.warn(_('Service killed that has no database entry'))
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
for x in self.timers:
try:
x.stop()
except Exception:
pass
self.timers = []
def wait(self):
for x in self.timers:
try:
x.wait()
except Exception:
pass
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def basic_config_check(self):
"""Perform basic config checks before starting processing."""
# Make sure the tempdir exists and is writable
try:
with utils.tempdir() as tmpdir:
pass
except Exception as e:
LOG.error(_('Temporary directory is invalid: %s'), e)
sys.exit(1)
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = getattr(CONF, '%s_workers' % name, None)
self.use_ssl = use_ssl
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl,
max_url_len=max_url_len)
# Pull back actual port used
self.port = self.server.port
self.backdoor_port = None
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
if workers:
_launcher = ProcessLauncher()
_launcher.launch_server(server, workers=workers)
else:
_launcher = ServiceLauncher()
_launcher.launch_server(server)
def wait():
_launcher.wait()
|
|
# -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7014 (FEB 2012)
import os
import sys
import time
from paver.easy import *
from paver.setuputils import setup
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
setup(
name="geoq",
packages=['geoq'],
version='0.0.0.2',
url="",
author="Site Admin",
author_email="admin@localhost"
)
@task
def install_dependencies():
""" Installs dependencies."""
sh('pip install --upgrade -r geoq/requirements.txt')
@cmdopts([
('fixture=', 'f', 'Fixture to install"'),
])
@task
def install_fixture(options):
""" Loads the supplied fixture """
fixture = options.get('fixture')
sh("python manage.py loaddata {fixture}".format(fixture=fixture))
def _perms_check():
sh("python manage.py check_permissions") # Check userena perms
sh("python manage.py clean_expired") # Clean our expired userena perms
@task
def install_dev_fixtures():
""" Installs development fixtures in the correct order """
fixtures = [
'geoq/fixtures/initial_data.json', # user permissions
'geoq/accounts/fixtures/initial_data.json', # dummy users and groups
'geoq/maps/fixtures/initial_data_types.json', # Maps
'geoq/core/fixtures/initial_data.json',
#'geoq/badges/fixtures/initial_data.json', # Removing badges for now, b/c not working
]
for fixture in fixtures:
sh("python manage.py loaddata {fixture}".format(fixture=fixture))
sh("python manage.py migrate --all")
_perms_check()
@task
def sync():
""" Runs the syncdb process with migrations """
sh("python manage.py syncdb --noinput")
sh("python manage.py migrate --all --no-initial-data")
fixture = 'geoq/fixtures/initial_data.json'
sh("python manage.py loaddata {fixture}".format(fixture=fixture))
_perms_check()
@task
def reset_dev_env():
""" Resets your dev environment from scratch in the current branch you are in. """
from geoq import settings
database = settings.DATABASES.get('default').get('NAME')
sh('dropdb {database}'.format(database=database))
createdb()
sync()
install_dev_fixtures()
@cmdopts([
('bind=', 'b', 'Bind server to provided IP address and port number.'),
])
@task
def start_django(options):
""" Starts the Django application. """
bind = options.get('bind', '')
sh('python manage.py runserver %s &' % bind)
@task
def delayed_fixtures():
"""Loads maps"""
sh('python manage.py loaddata initial_data.json')
@task
def stop_django():
"""
Stop the GeoNode Django application
"""
kill('python', 'runserver')
@needs(['stop_django',
'sync',
'start_django'])
def start():
""" Syncs the database and then starts the development server. """
info("GeoQ is now available.")
@task
def createdb(options):
""" Creates the database in postgres. """
from geoq import settings
database = settings.DATABASES.get('default').get('NAME')
sh('createdb {database}'.format(database=database))
sh('echo "CREATE EXTENSION postgis;CREATE EXTENSION postgis_topology" | psql -d {database}'.format(database=database))
@task
def create_db_user():
""" Creates the database in postgres. """
from geoq import settings
database = settings.DATABASES.get('default').get('NAME')
user = settings.DATABASES.get('default').get('USER')
password = settings.DATABASES.get('default').get('PASSWORD')
sh('psql -d {database} -c {sql}'.format(
database=database,
sql='"CREATE USER {user} WITH PASSWORD \'{password}\';"'.format(user=user, password=password)))
# Order matters for the list of apps, otherwise migrations reset may fail.
_APPS = ['maps', 'accounts', 'badges', 'core']
@task
def reset_migrations():
"""
Takes an existing environment and updates it after a full migration reset.
"""
for app in _APPS:
sh('python manage.py migrate %s 0001 --fake --delete-ghost-migrations' % app)
@task
def reset_migrations_full():
"""
Resets south to start with a clean setup.
This task will process a default list: accounts, core, maps, badges
To run a full reset which removes all migraitons in repo -- run paver reset_south full
"""
for app in _APPS:
sh('rm -rf geoq/%s/migrations/' % app)
sh('python manage.py schemamigration %s --initial' % app)
# Finally, we execute the last setup.
reset_migrations()
def kill(arg1, arg2):
"""Stops a proces that contains arg1 and is filtered by arg2
"""
from subprocess import Popen, PIPE
# Wait until ready
t0 = time.time()
# Wait no more than these many seconds
time_out = 30
running = True
lines = []
while running and time.time() - t0 < time_out:
p = Popen('ps aux | grep %s' % arg1, shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
lines = p.stdout.readlines()
running = False
for line in lines:
if '%s' % arg2 in line:
running = True
# Get pid
fields = line.strip().split()
info('Stopping %s (process number %s)' % (arg1, fields[1]))
kill_cmd = 'kill -9 %s 2> /dev/null' % fields[1]
os.system(kill_cmd)
# Give it a little more time
time.sleep(1)
else:
pass
if running:
raise Exception('Could not stop %s: '
'Running processes are\n%s'
% (arg1, '\n'.join([l.strip() for l in lines])))
|
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import os
import debtcollector.renames
from keystoneauth1 import access
from keystoneauth1 import adapter
from oslo_serialization import jsonutils
from oslo_utils import importutils
import requests
from neutronclient._i18n import _
from neutronclient.common import exceptions
from neutronclient.common import utils
osprofiler_web = importutils.try_import("osprofiler.web")
_logger = logging.getLogger(__name__)
if os.environ.get('NEUTRONCLIENT_DEBUG'):
ch = logging.StreamHandler()
_logger.setLevel(logging.DEBUG)
_logger.addHandler(ch)
_requests_log_level = logging.DEBUG
else:
_requests_log_level = logging.WARNING
logging.getLogger("requests").setLevel(_requests_log_level)
MAX_URI_LEN = 8192
USER_AGENT = 'python-neutronclient'
REQ_ID_HEADER = 'X-OpenStack-Request-ID'
class HTTPClient(object):
"""Handles the REST calls and responses, include authn."""
CONTENT_TYPE = 'application/json'
@debtcollector.renames.renamed_kwarg(
'tenant_id', 'project_id', replace=True)
@debtcollector.renames.renamed_kwarg(
'tenant_name', 'project_name', replace=True)
def __init__(self, username=None, user_id=None,
project_name=None, project_id=None,
password=None, auth_url=None,
token=None, region_name=None, timeout=None,
endpoint_url=None, insecure=False,
endpoint_type='publicURL',
auth_strategy='keystone', ca_cert=None, cert=None,
log_credentials=False, service_type='network',
global_request_id=None, **kwargs):
self.username = username
self.user_id = user_id
self.project_name = project_name
self.project_id = project_id
self.password = password
self.auth_url = auth_url.rstrip('/') if auth_url else None
self.service_type = service_type
self.endpoint_type = endpoint_type
self.region_name = region_name
self.timeout = timeout
self.auth_token = token
self.auth_tenant_id = None
self.auth_user_id = None
self.endpoint_url = endpoint_url
self.auth_strategy = auth_strategy
self.log_credentials = log_credentials
self.global_request_id = global_request_id
self.cert = cert
if insecure:
self.verify_cert = False
else:
self.verify_cert = ca_cert if ca_cert else True
def _cs_request(self, *args, **kwargs):
kargs = {}
kargs.setdefault('headers', kwargs.get('headers', {}))
kargs['headers']['User-Agent'] = USER_AGENT
if 'body' in kwargs:
kargs['body'] = kwargs['body']
if self.log_credentials:
log_kargs = kargs
else:
log_kargs = self._strip_credentials(kargs)
utils.http_log_req(_logger, args, log_kargs)
try:
resp, body = self.request(*args, **kargs)
except requests.exceptions.SSLError as e:
raise exceptions.SslCertificateValidationError(reason=str(e))
except Exception as e:
# Wrap the low-level connection error (socket timeout, redirect
# limit, decompression error, etc) into our custom high-level
# connection exception (it is excepted in the upper layers of code)
_logger.debug("throwing ConnectionFailed : %s", e)
raise exceptions.ConnectionFailed(reason=str(e))
utils.http_log_resp(_logger, resp, body)
# log request-id for each api call
request_id = resp.headers.get('x-openstack-request-id')
if request_id:
_logger.debug('%(method)s call to neutron for '
'%(url)s used request id '
'%(response_request_id)s',
{'method': resp.request.method,
'url': resp.url,
'response_request_id': request_id})
if resp.status_code == 401:
raise exceptions.Unauthorized(message=body)
return resp, body
def _strip_credentials(self, kwargs):
if kwargs.get('body') and self.password:
log_kwargs = kwargs.copy()
log_kwargs['body'] = kwargs['body'].replace(self.password,
'REDACTED')
return log_kwargs
else:
return kwargs
def authenticate_and_fetch_endpoint_url(self):
if not self.auth_token:
self.authenticate()
elif not self.endpoint_url:
self.endpoint_url = self._get_endpoint_url()
def request(self, url, method, body=None, headers=None, **kwargs):
"""Request without authentication."""
content_type = kwargs.pop('content_type', None) or 'application/json'
headers = headers or {}
headers.setdefault('Accept', content_type)
if body:
headers.setdefault('Content-Type', content_type)
if self.global_request_id:
headers.setdefault(REQ_ID_HEADER, self.global_request_id)
headers['User-Agent'] = USER_AGENT
# NOTE(dbelova): osprofiler_web.get_trace_id_headers does not add any
# headers in case if osprofiler is not initialized.
if osprofiler_web:
headers.update(osprofiler_web.get_trace_id_headers())
resp = requests.request(
method,
url,
data=body,
headers=headers,
verify=self.verify_cert,
cert=self.cert,
timeout=self.timeout,
**kwargs)
return resp, resp.text
def _check_uri_length(self, action):
uri_len = len(self.endpoint_url) + len(action)
if uri_len > MAX_URI_LEN:
raise exceptions.RequestURITooLong(
excess=uri_len - MAX_URI_LEN)
def do_request(self, url, method, **kwargs):
# Ensure client always has correct uri - do not guesstimate anything
self.authenticate_and_fetch_endpoint_url()
self._check_uri_length(url)
# Perform the request once. If we get a 401 back then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
kwargs['headers'] = kwargs.get('headers') or {}
if self.auth_token is None:
self.auth_token = ""
kwargs['headers']['X-Auth-Token'] = self.auth_token
resp, body = self._cs_request(self.endpoint_url + url, method,
**kwargs)
return resp, body
except exceptions.Unauthorized:
self.authenticate()
kwargs['headers'] = kwargs.get('headers') or {}
kwargs['headers']['X-Auth-Token'] = self.auth_token
resp, body = self._cs_request(
self.endpoint_url + url, method, **kwargs)
return resp, body
def _extract_service_catalog(self, body):
"""Set the client's service catalog from the response data."""
self.auth_ref = access.create(body=body)
self.service_catalog = self.auth_ref.service_catalog
self.auth_token = self.auth_ref.auth_token
self.auth_tenant_id = self.auth_ref.tenant_id
self.auth_user_id = self.auth_ref.user_id
if not self.endpoint_url:
self.endpoint_url = self.service_catalog.url_for(
region_name=self.region_name,
service_type=self.service_type,
interface=self.endpoint_type)
def _authenticate_keystone(self):
if self.user_id:
creds = {'userId': self.user_id,
'password': self.password}
else:
creds = {'username': self.username,
'password': self.password}
if self.project_id:
body = {'auth': {'passwordCredentials': creds,
'tenantId': self.project_id, }, }
else:
body = {'auth': {'passwordCredentials': creds,
'tenantName': self.project_name, }, }
if self.auth_url is None:
raise exceptions.NoAuthURLProvided()
token_url = self.auth_url + "/tokens"
resp, resp_body = self._cs_request(token_url, "POST",
body=jsonutils.dumps(body),
content_type="application/json",
allow_redirects=True)
if resp.status_code != 200:
raise exceptions.Unauthorized(message=resp_body)
if resp_body:
try:
resp_body = jsonutils.loads(resp_body)
except ValueError:
pass
else:
resp_body = None
self._extract_service_catalog(resp_body)
def _authenticate_noauth(self):
if not self.endpoint_url:
message = _('For "noauth" authentication strategy, the endpoint '
'must be specified either in the constructor or '
'using --os-url')
raise exceptions.Unauthorized(message=message)
def authenticate(self):
if self.auth_strategy == 'keystone':
self._authenticate_keystone()
elif self.auth_strategy == 'noauth':
self._authenticate_noauth()
else:
err_msg = _('Unknown auth strategy: %s') % self.auth_strategy
raise exceptions.Unauthorized(message=err_msg)
def _get_endpoint_url(self):
if self.auth_url is None:
raise exceptions.NoAuthURLProvided()
url = self.auth_url + '/tokens/%s/endpoints' % self.auth_token
try:
resp, body = self._cs_request(url, "GET")
except exceptions.Unauthorized:
# rollback to authenticate() to handle case when neutron client
# is initialized just before the token is expired
self.authenticate()
return self.endpoint_url
body = jsonutils.loads(body)
for endpoint in body.get('endpoints', []):
if (endpoint['type'] == 'network' and
endpoint.get('region') == self.region_name):
if self.endpoint_type not in endpoint:
raise exceptions.EndpointTypeNotFound(
type_=self.endpoint_type)
return endpoint[self.endpoint_type]
raise exceptions.EndpointNotFound()
def get_auth_info(self):
return {'auth_token': self.auth_token,
'auth_tenant_id': self.auth_tenant_id,
'auth_user_id': self.auth_user_id,
'endpoint_url': self.endpoint_url}
def get_auth_ref(self):
return getattr(self, 'auth_ref', None)
class SessionClient(adapter.Adapter):
def request(self, *args, **kwargs):
kwargs.setdefault('authenticated', False)
kwargs.setdefault('raise_exc', False)
content_type = kwargs.pop('content_type', None) or 'application/json'
headers = kwargs.get('headers') or {}
headers.setdefault('Accept', content_type)
# NOTE(dbelova): osprofiler_web.get_trace_id_headers does not add any
# headers in case if osprofiler is not initialized.
if osprofiler_web:
headers.update(osprofiler_web.get_trace_id_headers())
try:
kwargs.setdefault('data', kwargs.pop('body'))
except KeyError:
pass
if kwargs.get('data'):
headers.setdefault('Content-Type', content_type)
kwargs['headers'] = headers
resp = super(SessionClient, self).request(*args, **kwargs)
return resp, resp.text
def _check_uri_length(self, url):
uri_len = len(self.endpoint_url) + len(url)
if uri_len > MAX_URI_LEN:
raise exceptions.RequestURITooLong(
excess=uri_len - MAX_URI_LEN)
def do_request(self, url, method, **kwargs):
kwargs.setdefault('authenticated', True)
self._check_uri_length(url)
return self.request(url, method, **kwargs)
@property
def endpoint_url(self):
# NOTE(jamielennox): This is used purely by the CLI and should be
# removed when the CLI gets smarter.
return self.get_endpoint()
@property
def auth_token(self):
# NOTE(jamielennox): This is used purely by the CLI and should be
# removed when the CLI gets smarter.
return self.get_token()
def authenticate(self):
# NOTE(jamielennox): This is used purely by the CLI and should be
# removed when the CLI gets smarter.
self.get_token()
def get_auth_info(self):
auth_info = {'auth_token': self.auth_token,
'endpoint_url': self.endpoint_url}
# NOTE(jamielennox): This is the best we can do here. It will work
# with identity plugins which is the primary case but we should
# deprecate it's usage as much as possible.
try:
get_access = (self.auth or self.session.auth).get_access
except AttributeError:
pass
else:
auth_ref = get_access(self.session)
auth_info['auth_tenant_id'] = auth_ref.project_id
auth_info['auth_user_id'] = auth_ref.user_id
return auth_info
def get_auth_ref(self):
return self.session.auth.get_auth_ref(self.session)
# FIXME(bklei): Should refactor this to use kwargs and only
# explicitly list arguments that are not None.
@debtcollector.renames.renamed_kwarg('tenant_id', 'project_id', replace=True)
@debtcollector.renames.renamed_kwarg(
'tenant_name', 'project_name', replace=True)
def construct_http_client(username=None,
user_id=None,
project_name=None,
project_id=None,
password=None,
auth_url=None,
token=None,
region_name=None,
timeout=None,
endpoint_url=None,
insecure=False,
endpoint_type='public',
log_credentials=None,
auth_strategy='keystone',
ca_cert=None,
cert=None,
service_type='network',
session=None,
global_request_id=None,
**kwargs):
if session:
kwargs.setdefault('user_agent', USER_AGENT)
kwargs.setdefault('interface', endpoint_type)
return SessionClient(session=session,
service_type=service_type,
region_name=region_name,
global_request_id=global_request_id,
**kwargs)
else:
# FIXME(bklei): username and password are now optional. Need
# to test that they were provided in this mode. Should also
# refactor to use kwargs.
return HTTPClient(username=username,
password=password,
project_id=project_id,
project_name=project_name,
user_id=user_id,
auth_url=auth_url,
token=token,
endpoint_url=endpoint_url,
insecure=insecure,
timeout=timeout,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
ca_cert=ca_cert,
cert=cert,
log_credentials=log_credentials,
auth_strategy=auth_strategy,
global_request_id=global_request_id)
|
|
from nose.tools import assert_almost_equal, assert_raises
from mapproxy_webconf import defaults
from mapproxy_webconf.lib.geojson import ConfigGeoJSONGrid, polygons, \
point_feature, polygon_feature, features, \
InvalidGridBBoxTransformationException, InvalidTileBBoxTransformationException
GLOBAL_BBOX_4326 = [-180.0, -90.0, 180.0, 90.0]
GLOBAL_BBOX_4326_ALIGNED = [-180, -85.05112877, 180, 85.05112877]
LOCAL_BBOX_4326_1 = [-20.0, -20.0, 20.0, 20.0]
LOCAL_BBOX_4326_2 = [-40.0, -40.0, 40.0, 40.0]
GLOBAL_BBOX_3857 = [-20037508.342789236, -20037508.342789236,
20037508.342789236, 20037508.342789236]
GLOBAL_BBOX_TRANSFORMED_TO_3857 = [-20037508.342789244, -
147730762.66992167,
20037508.342789244,
147730758.19456753]
LOCAL_BBOX_3857_1 = [-2226389.8158654715, -2273030.92698769,
2226389.8158654715, 2273030.926987689]
LOCAL_BBOX_3857_2 = [-4452779.631730943, -4865942.279503176,
4452779.631730943, 4865942.279503176]
OVERGLOBAL_BBOX_4326 = [-703.125, -421.825, 703.125, 421.825]
GLOBAL_POLYGON_4326 = [
(-180.0, -90.0), (180.0, -90.0), (180.0, 90.0), (-180.0, 90.0), (-180.0, -90.0)]
GLOBAL_POLYGON_3857 = [(-20037508.342789236, -20037508.342789236),
(20037508.342789236, -20037508.342789236),
(20037508.342789236, 20037508.342789236),
(-20037508.342789236, 20037508.342789236),
(-20037508.342789236, -20037508.342789236)]
LOCAL_POLYGON_3857 = [(-2226389.8158654715, -2273030.92698769),
(2226389.8158654715, -2273030.92698769),
(2226389.8158654715, 2273030.926987689),
(-2226389.8158654715, 2273030.926987689),
(-2226389.8158654715, -2273030.92698769)]
def assert_list_almost_equal(list_a, list_b):
assert len(list_a) == len(list_b)
for i in range(len(list_a)):
assert_almost_equal(list_a[i], list_b[i])
def assert_point_list_almost_equal(list_a, list_b):
assert len(list_a) == len(list_b)
for i in range(len(list_a)):
assert_almost_equal(list_a[i][0], list_b[i][0])
assert_almost_equal(list_a[i][1], list_b[i][1])
class TestConfigGeoJSONGrid(object):
def test_without_parameters(self):
config = ConfigGeoJSONGrid()
assert config.request_bbox == None
assert config.grid_bbox == None
assert config.level == None
assert config.grid_srs == None
assert config.grid_bbox_srs == None
assert config.map_srs == None
assert config.map_bbox == None
assert config.res == None
assert config.origin == 'll'
def test_all_bboxes_in_grid_srs(self):
# global
with assert_raises(InvalidTileBBoxTransformationException) as cm:
config = ConfigGeoJSONGrid(grid_srs='EPSG:4326', map_srs='EPSG:3857',
grid_bbox_srs='EPSG:3857',
request_bbox=GLOBAL_BBOX_3857,
grid_bbox=GLOBAL_BBOX_3857)
assert cm.exception.args[0] == 'Invalid transformation for tile in level 0'
with assert_raises(InvalidTileBBoxTransformationException) as cm:
config = ConfigGeoJSONGrid(grid_srs='EPSG:4326', map_srs='EPSG:3857',
grid_bbox_srs='EPSG:4326',
request_bbox=GLOBAL_BBOX_3857,
grid_bbox=GLOBAL_BBOX_4326)
assert cm.exception.args[0] == 'Invalid transformation for tile in level 0'
with assert_raises(InvalidGridBBoxTransformationException) as cm:
config = ConfigGeoJSONGrid(grid_srs='EPSG:3857', map_srs='EPSG:4326',
grid_bbox_srs='EPSG:4326',
request_bbox=GLOBAL_BBOX_4326,
grid_bbox=[-180, -90, 180, 270])
assert cm.exception.args[0] == 'Invalid transformation for grid_bbox'
config = ConfigGeoJSONGrid(grid_srs='EPSG:3857', map_srs='EPSG:4326',
grid_bbox_srs='EPSG:3857',
request_bbox=GLOBAL_BBOX_4326,
grid_bbox=GLOBAL_BBOX_3857)
assert config.map_bbox == None
assert_list_almost_equal(config.grid_bbox, GLOBAL_BBOX_3857)
# local
config = ConfigGeoJSONGrid(grid_srs='EPSG:4326', map_srs='EPSG:3857',
grid_bbox_srs='EPSG:3857',
request_bbox=LOCAL_BBOX_3857_1,
grid_bbox=LOCAL_BBOX_3857_2)
assert_list_almost_equal(config.map_bbox, LOCAL_BBOX_4326_1)
assert_list_almost_equal(config.grid_bbox, LOCAL_BBOX_4326_2)
config = ConfigGeoJSONGrid(grid_srs='EPSG:3857', map_srs='EPSG:4326',
grid_bbox_srs='EPSG:4326',
request_bbox=LOCAL_BBOX_4326_1,
grid_bbox=LOCAL_BBOX_4326_2)
assert_list_almost_equal(config.map_bbox, LOCAL_BBOX_3857_1)
assert_list_almost_equal(config.grid_bbox, LOCAL_BBOX_3857_2)
# overglobal
with assert_raises(InvalidGridBBoxTransformationException) as cm:
config = ConfigGeoJSONGrid(grid_srs='EPSG:3857', map_srs='EPSG:4326',
grid_bbox_srs='EPSG:4326',
request_bbox=OVERGLOBAL_BBOX_4326,
grid_bbox=GLOBAL_BBOX_4326)
assert cm.exception.args[0] == 'Invalid transformation for grid_bbox'
def test_view_box(self):
grid_srs = 'EPSG:4326'
grid_bbox_srs = 'EPSG:4326'
map_srs = 'EPSG:4326'
level = 0
config = ConfigGeoJSONGrid(grid_bbox=[-30, -30, 30, 30],
request_bbox=LOCAL_BBOX_4326_1,
grid_srs=grid_srs, grid_bbox_srs=grid_bbox_srs,
map_srs=map_srs, level=level)
assert_list_almost_equal(config.view_bbox, LOCAL_BBOX_4326_1)
config = ConfigGeoJSONGrid(grid_bbox=[-30, -30, 30, 30],
request_bbox=LOCAL_BBOX_4326_2,
grid_srs=grid_srs, grid_bbox_srs=grid_bbox_srs,
map_srs=map_srs, level=level)
assert_list_almost_equal(config.view_bbox, [-30.0, -30.0, 30.0, 30.0])
config = ConfigGeoJSONGrid(grid_bbox=[-10, -30, 10, 30],
request_bbox=LOCAL_BBOX_4326_1,
grid_srs=grid_srs, grid_bbox_srs=grid_bbox_srs,
map_srs=map_srs, level=level)
assert_list_almost_equal(config.view_bbox, [-10.0, -20.0, 10.0, 20.0])
map_srs = 'EPSG:3857'
config = ConfigGeoJSONGrid(grid_bbox=[-30, -30, 30, 30],
request_bbox=LOCAL_BBOX_3857_1,
grid_srs=grid_srs, grid_bbox_srs=grid_bbox_srs,
map_srs=map_srs, level=level)
assert_list_almost_equal(config.view_bbox, LOCAL_BBOX_4326_1)
config = ConfigGeoJSONGrid(grid_bbox=[-30, -30, 30, 30],
request_bbox=LOCAL_BBOX_3857_2,
grid_srs=grid_srs, grid_bbox_srs=grid_bbox_srs,
map_srs=map_srs, level=level)
assert_list_almost_equal(config.view_bbox, [-30.0, -30.0, 30.0, 30.0])
config = ConfigGeoJSONGrid(grid_bbox=[-10, -30, 10, 30],
request_bbox=LOCAL_BBOX_3857_1,
grid_srs=grid_srs, grid_bbox_srs=grid_bbox_srs,
map_srs=map_srs, level=level)
assert_list_almost_equal(config.view_bbox, [-10.0, -20.0, 10.0, 20.0])
grid_srs = 'EPSG:3857'
map_srs = 'EPSG:4326'
config = ConfigGeoJSONGrid(grid_bbox=[-30, -30, 30, 30],
request_bbox=LOCAL_BBOX_4326_1,
grid_srs=grid_srs, grid_bbox_srs=grid_bbox_srs,
map_srs=map_srs, level=level)
assert_list_almost_equal(config.view_bbox, LOCAL_BBOX_3857_1)
config = ConfigGeoJSONGrid(grid_bbox=[-30, -30, 30, 30],
request_bbox=LOCAL_BBOX_4326_2,
grid_srs=grid_srs, grid_bbox_srs=grid_bbox_srs,
map_srs=map_srs, level=level)
assert_list_almost_equal(
config.view_bbox, [-3339584.723798206, -3503549.8435043744,
3339584.723798206, 3503549.843504374])
config = ConfigGeoJSONGrid(grid_bbox=[-10, -30, 10, 30],
request_bbox=LOCAL_BBOX_4326_1,
grid_srs=grid_srs,
grid_bbox_srs=grid_bbox_srs,
map_srs=map_srs,
level=level)
assert_list_almost_equal(
config.view_bbox, [-1113194.9079327343, -2273030.92698769,
1113194.9079327343, 2273030.926987689])
with assert_raises(InvalidGridBBoxTransformationException) as cm:
config = ConfigGeoJSONGrid(grid_srs='EPSG:3857', map_srs='EPSG:4326',
grid_bbox_srs='EPSG:4326',
request_bbox=OVERGLOBAL_BBOX_4326,
grid_bbox=GLOBAL_BBOX_4326)
assert cm.exception.args[0] == 'Invalid transformation for grid_bbox'
def test_global_polygon(self):
defaults.TILE_POLYGON_POINTS = 4
map_srs = 'EPSG:4326'
grid_srs = 'EPSG:4326'
grid_bbox_srs = 'EPSG:4326'
grid_bbox = GLOBAL_BBOX_4326
request_bbox = GLOBAL_BBOX_4326
config = ConfigGeoJSONGrid(map_srs=map_srs, grid_srs=grid_srs,
grid_bbox_srs=grid_bbox_srs,
grid_bbox=grid_bbox, request_bbox=request_bbox)
result = list(polygons(config, [(0, 0, 0)], False))[0]
assert_point_list_almost_equal(result[0][0], GLOBAL_POLYGON_4326)
result = list(polygons(config, [(0, 0, 0)], True))[0]
assert_point_list_almost_equal(result[0][0], GLOBAL_POLYGON_4326)
assert_list_almost_equal(result[1], [0.0, 0.0])
assert result[2] == (0, 0, 0)
config = ConfigGeoJSONGrid(grid_srs='EPSG:4326', map_srs='EPSG:4326',
grid_bbox_srs='EPSG:4326',
request_bbox=OVERGLOBAL_BBOX_4326,
grid_bbox=GLOBAL_BBOX_4326)
result = list(polygons(config, [(0, 0, 0)], False))[0]
assert_point_list_almost_equal(result[0][0], GLOBAL_POLYGON_4326)
with assert_raises(InvalidGridBBoxTransformationException) as cm:
config = ConfigGeoJSONGrid(grid_srs='EPSG:3857', map_srs='EPSG:4326',
grid_bbox_srs='EPSG:4326',
request_bbox=OVERGLOBAL_BBOX_4326,
grid_bbox=GLOBAL_BBOX_4326)
assert cm.exception.args[0] == 'Invalid transformation for grid_bbox'
def test_local_polygon(self):
defaults.TILE_POLYGON_POINTS = 4
map_srs = 'EPSG:3857'
grid_srs = 'EPSG:4326'
grid_bbox_srs = 'EPSG:4326'
grid_bbox = LOCAL_BBOX_4326_1
request_bbox = LOCAL_BBOX_3857_1
config = ConfigGeoJSONGrid(map_srs=map_srs, grid_srs=grid_srs,
grid_bbox_srs=grid_bbox_srs, grid_bbox=grid_bbox,
request_bbox=request_bbox)
result = list(polygons(config, [(0, 0, 0)], False))[0]
assert_point_list_almost_equal(result[0][0], LOCAL_POLYGON_3857)
result = list(polygons(config, [(0, 0, 0)], True))[0]
assert_point_list_almost_equal(result[0][0], LOCAL_POLYGON_3857)
assert_list_almost_equal(result[1], [0.0, 0.0])
assert result[2] == (0, 0, 0)
class TestFeatureCreation(object):
def test_point_feature(self):
assert point_feature([4, 4]) == {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [4, 4]
},
"properties": {}
}
assert point_feature([4, 4], {"foo": "bar"}) == {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [4, 4]
},
"properties": {"foo": "bar"}
}
def test_polygon_feature(self):
assert polygon_feature([[[1, 1], [2, 1], [2, 2], [1, 2], [1, 1]]]) == {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[[1, 1], [2, 1], [2, 2], [1, 2], [1, 1]]]
},
"properties": {}
}
assert polygon_feature([[[1, 1], [2, 1], [2, 2],
[1, 2], [1, 1]]], {"foo": "bar"}) == {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[[1, 1], [2, 1], [2, 2], [1, 2], [1, 1]]]
},
"properties": {"foo": "bar"}
}
def test_feature_list(self):
request_bbox = GLOBAL_BBOX_4326
grid_bbox = GLOBAL_BBOX_4326
level = 0
grid_srs = 'EPSG:4326'
grid_bbox_srs = 'EPSG:4326'
map_srs = 'EPSG:4326'
config = ConfigGeoJSONGrid(request_bbox=request_bbox, grid_bbox=grid_bbox,
level=level, grid_srs=grid_srs,
grid_bbox_srs=grid_bbox_srs, map_srs=map_srs)
result = features(config)
assert len(result) == 2
level = 1
config = ConfigGeoJSONGrid(request_bbox=request_bbox, grid_bbox=grid_bbox,
level=level, grid_srs=grid_srs,
grid_bbox_srs=grid_bbox_srs, map_srs=map_srs)
result = features(config)
assert len(result) == 4
level = 0
with assert_raises(InvalidGridBBoxTransformationException) as cm:
config = ConfigGeoJSONGrid(grid_srs='EPSG:3857', map_srs='EPSG:4326',
level=level,
grid_bbox_srs='EPSG:4326',
request_bbox=OVERGLOBAL_BBOX_4326,
grid_bbox=GLOBAL_BBOX_4326)
assert cm.exception.args[0] == 'Invalid transformation for grid_bbox'
|
|
"""I/O helper functions for pylinac."""
import os
import os.path as osp
import struct
import zipfile
from tempfile import TemporaryDirectory
from typing import Callable, List, Tuple
from urllib.error import HTTPError, URLError
from urllib.request import urlretrieve, urlopen
import numpy as np
import pydicom
from tqdm import tqdm
from pylinac.core.profile import SingleProfile
def is_dicom(file: str) -> bool:
"""Boolean specifying if file is a proper DICOM file.
This function is a pared down version of read_preamble meant for a fast return.
The file is read for a proper preamble ('DICM'), returning True if so,
and False otherwise. This is a conservative approach.
Parameters
----------
file : str
The path to the file.
See Also
--------
pydicom.filereader.read_preamble
pydicom.filereader.read_partial
"""
with open(file, 'rb') as fp:
fp.read(0x80)
prefix = fp.read(4)
return prefix == b"DICM"
def is_dicom_image(file: str) -> bool:
"""Boolean specifying if file is a proper DICOM file with a image
Parameters
----------
file : str
The path to the file.
See Also
--------
pydicom.filereader.read_preamble
pydicom.filereader.read_partial
"""
result = False
try:
img = pydicom.dcmread(file, force=True)
if 'TransferSyntaxUID' not in img.file_meta:
img.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian
img.pixel_array
result = True
except (AttributeError, TypeError, KeyError, struct.error):
pass
return result
def retrieve_dicom_file(file: str) -> pydicom.FileDataset:
"""Read and return the DICOM dataset.
Parameters
----------
file : str
The path to the file.
"""
img = pydicom.dcmread(file, force=True)
if 'TransferSyntaxUID' not in img.file_meta:
img.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian
return img
def is_zipfile(file: str) -> bool:
"""Wrapper function for detecting if file is a true ZIP archive"""
return zipfile.is_zipfile(file)
class TemporaryZipDirectory(TemporaryDirectory):
"""Creates a temporary directory that unpacks a ZIP archive."""
def __init__(self, zfile):
"""
Parameters
----------
zfile : str
String that points to a ZIP archive.
"""
super().__init__()
zfiles = zipfile.ZipFile(zfile)
zfiles.extractall(path=self.name)
def retrieve_filenames(directory: str, func: Callable=None, recursive: bool=True, **kwargs) -> List[str]:
"""Retrieve file names in a directory.
Parameters
----------
directory : str
The directory to walk over recursively.
func : function, None
The function that validates if the file name should be kept.
If None, no validation will be performed and all file names will be returned.
recursive : bool
Whether to search only the root directory.
kwargs
Additional arguments passed to the func parameter.
"""
filenames = []
if func is None:
func = lambda x: True
for pdir, _, files in os.walk(directory):
for file in files:
filename = osp.join(pdir, file)
if func(filename, **kwargs):
filenames.append(filename)
if not recursive:
break
return filenames
def retrieve_demo_file(url: str, force: bool = False) -> str:
"""Retrieve the demo file either by getting it from file or from a URL.
If the file is already on disk it returns the file name. If the file isn't
on disk, get the file from the URL and put it at the expected demo file location
on disk for lazy loading next time.
Parameters
----------
url : str
The suffix to the url (location within the S3 bucket) pointing to the demo file.
"""
true_url = r'https://storage.googleapis.com/pylinac_demo_files/' + url
demo_file = osp.join(osp.dirname(osp.dirname(__file__)), 'demo_files', url)
demo_dir = osp.dirname(demo_file)
if not osp.exists(demo_dir):
os.makedirs(demo_dir)
if force or not osp.isfile(demo_file):
get_url(true_url, destination=demo_file)
return demo_file
def is_url(url: str) -> bool:
"""Determine whether a given string is a valid URL.
Parameters
----------
url : str
Returns
-------
bool
"""
try:
with urlopen(url) as r:
return r.status == 200
except:
return False
def get_url(url: str, destination: str=None, progress_bar: bool=True) -> str:
"""Download a URL to a local file.
Parameters
----------
url : str
The URL to download.
destination : str, None
The destination of the file. If None is given the file is saved to a temporary directory.
progress_bar : bool
Whether to show a command-line progress bar while downloading.
Returns
-------
filename : str
The location of the downloaded file.
Notes
-----
Progress bar use/example adapted from tqdm documentation: https://github.com/tqdm/tqdm
"""
def my_hook(t):
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
if b > 0:
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
try:
if progress_bar:
with tqdm(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
filename, _ = urlretrieve(url, filename=destination, reporthook=my_hook(t))
else:
filename, _ = urlretrieve(url, filename=destination)
except (HTTPError, URLError, ValueError) as e:
raise e
return filename
# this is easier with pandas, but I don't want that as a dependency at this point
class SNCProfiler:
"""Load a file from a Sun Nuclear Profiler device. This accepts .prs files."""
def __init__(self, path: str, detector_row: int = 106, bias_row: int = 107, calibration_row: int = 108,
data_row: int = -1, data_columns: slice = slice(5, 259)):
"""
Parameters
----------
path : str
Path to the .prs file.
detector_row
bias_row
calibration_row
data_row
data_columns
The range of columns that the data is in. Usually, there are some columns before and after the real data.
"""
with open(path, encoding='cp437') as f:
raw_data = f.read().splitlines()
self.detectors = raw_data[detector_row].split('\t')[data_columns]
self.bias = np.array(raw_data[bias_row].split('\t')[data_columns]).astype(float)
self.calibration = np.array(raw_data[calibration_row].split('\t')[data_columns]).astype(float)
self.data = np.array(raw_data[data_row].split('\t')[data_columns]).astype(float)
self.timetic = float(raw_data[bias_row].split('\t')[2])
self.integrated_dose = self.calibration * (self.data - self.bias * self.timetic)
def to_profiles(self, n_detectors_row: int = 63, **kwargs) -> Tuple[SingleProfile, SingleProfile, SingleProfile, SingleProfile]:
"""Convert the SNC data to SingleProfiles. These can be analyzed directly or passed to other modules like flat/sym.
Parameters
----------
n_detectors_row : int
The number of detectors in a given row. Note that they Y profile includes 2 extra detectors from the other 3.
"""
x_prof = SingleProfile(self.integrated_dose[:n_detectors_row], **kwargs)
y_prof = SingleProfile(self.integrated_dose[n_detectors_row:2*n_detectors_row+2], **kwargs)
pos_prof = SingleProfile(self.integrated_dose[2*n_detectors_row+2:3*n_detectors_row+2], **kwargs)
neg_prof = SingleProfile(self.integrated_dose[3*n_detectors_row+2:4*n_detectors_row+2], **kwargs)
return x_prof, y_prof, pos_prof, neg_prof
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FormBlock'
db.create_table(u'fancypages_formblock', (
(u'contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)),
('form_selection', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal(u'fancypages', ['FormBlock'])
def backwards(self, orm):
# Deleting model 'FormBlock'
db.delete_table(u'fancypages_formblock')
models = {
u'assets.imageasset': {
'Meta': {'object_name': 'ImageAsset'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'height': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fancypages.carouselblock': {
'Meta': {'ordering': "[u'display_order']", 'object_name': 'CarouselBlock', '_ormbases': [u'fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_1': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_10': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_2': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_3': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_4': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_5': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_6': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_7': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_8': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_9': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'link_url_1': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_10': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_2': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_3': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_4': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_5': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_6': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_7': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_8': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_9': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'fancypages.container': {
'Meta': {'unique_together': "(('name', 'content_type', 'object_id', 'language_code'),)", 'object_name': 'Container'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "u'en-us'", 'max_length': '7'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
u'fancypages.contentblock': {
'Meta': {'ordering': "[u'display_order']", 'object_name': 'ContentBlock'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blocks'", 'to': "orm['fancypages.Container']"}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.fancypage': {
'Meta': {'object_name': 'FancyPage'},
'date_visible_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_visible_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'pages'", 'symmetrical': 'False', 'to': "orm['fancypages.PageGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'node': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'page'", 'unique': 'True', 'null': 'True', 'to': "orm['fancypages.PageNode']"}),
'page_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pages'", 'null': 'True', 'to': "orm['fancypages.PageType']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
u'fancypages.formblock': {
'Meta': {'ordering': "[u'display_order']", 'object_name': 'FormBlock', '_ormbases': [u'fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'form_selection': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'fancypages.fourcolumnlayoutblock': {
'Meta': {'object_name': 'FourColumnLayoutBlock'},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.horizontalseparatorblock': {
'Meta': {'ordering': "[u'display_order']", 'object_name': 'HorizontalSeparatorBlock', '_ormbases': [u'fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
u'fancypages.imageandtextblock': {
'Meta': {'object_name': 'ImageAndTextBlock', '_ormbases': [u'fancypages.ContentBlock']},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_asset': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "u'image_text_blocks'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "u'Your text goes here.'"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fancypages.imageblock': {
'Meta': {'object_name': 'ImageBlock', '_ormbases': [u'fancypages.ContentBlock']},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_asset': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "u'image_blocks'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'fancypages.orderedcontainer': {
'Meta': {'object_name': 'OrderedContainer', '_ormbases': ['fancypages.Container']},
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.Container']", 'unique': 'True', 'primary_key': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'fancypages.pagegroup': {
'Meta': {'object_name': 'PageGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
u'fancypages.pagenavigationblock': {
'Meta': {'ordering': "[u'display_order']", 'object_name': 'PageNavigationBlock', '_ormbases': [u'fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'origin': ('django.db.models.fields.CharField', [], {'default': "u'absolute'", 'max_length': '50'})
},
'fancypages.pagenode': {
'Meta': {'object_name': 'PageNode'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'fancypages.pagetype': {
'Meta': {'object_name': 'PageType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.tabblock': {
'Meta': {'ordering': "[u'display_order']", 'object_name': 'TabBlock', '_ormbases': [u'fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
u'fancypages.textblock': {
'Meta': {'ordering': "[u'display_order']", 'object_name': 'TextBlock', '_ormbases': [u'fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "u'Your text goes here.'"})
},
'fancypages.threecolumnlayoutblock': {
'Meta': {'object_name': 'ThreeColumnLayoutBlock'},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
u'fancypages.titletextblock': {
'Meta': {'ordering': "[u'display_order']", 'object_name': 'TitleTextBlock', '_ormbases': [u'fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "u'Your text goes here.'"}),
'title': ('django.db.models.fields.CharField', [], {'default': "u'Your title goes here.'", 'max_length': '100'})
},
'fancypages.twitterblock': {
'Meta': {'ordering': "[u'display_order']", 'object_name': 'TwitterBlock', '_ormbases': [u'fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'max_tweets': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'fancypages.twocolumnlayoutblock': {
'Meta': {'object_name': 'TwoColumnLayoutBlock'},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'left_width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '6', 'max_length': '3'})
},
'fancypages.videoblock': {
'Meta': {'ordering': "[u'display_order']", 'object_name': 'VideoBlock', '_ormbases': [u'fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'video_code': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['fancypages']
|
|
from __future__ import absolute_import, division, print_function, with_statement
import traceback
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.log import gen_log, app_log
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.test.util import unittest
from tornado.web import Application, RequestHandler
from tornado.util import u
try:
import tornado.websocket # noqa
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
try:
from tornado import speedups
except ImportError:
speedups = None
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for deterministic cleanup of the associated socket.
"""
def initialize(self, close_future, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(message, isinstance(message, bytes))
class ErrorInOnMessageHandler(TestWebSocketHandler):
def on_message(self, message):
1 / 0
class HeaderHandler(TestWebSocketHandler):
def open(self):
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
self.set_status(503)
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get('X-Test', ''))
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
class AsyncPrepareHandler(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
class WebSocketBaseTestCase(AsyncHTTPTestCase):
@gen.coroutine
def ws_connect(self, path, compression_options=None):
ws = yield websocket_connect(
'ws://127.0.0.1:%d%s' % (self.get_http_port(), path),
compression_options=compression_options)
raise gen.Return(ws)
@gen.coroutine
def close(self, ws):
"""Close a websocket connection and wait for the server side.
If we don't wait here, there are sometimes leak warnings in the
tests.
"""
ws.close()
yield self.close_future
class WebSocketTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
('/header', HeaderHandler, dict(close_future=self.close_future)),
('/close_reason', CloseReasonHandler,
dict(close_future=self.close_future)),
('/error_in_on_message', ErrorInOnMessageHandler,
dict(close_future=self.close_future)),
('/async_prepare', AsyncPrepareHandler,
dict(close_future=self.close_future)),
])
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect('/echo')
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
def test_websocket_callbacks(self):
websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port(),
io_loop=self.io_loop, callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(b'hello \xe9', binary=True)
response = yield ws.read_message()
self.assertEqual(response, b'hello \xe9')
yield self.close(ws)
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(u('hello \u00e9'))
response = yield ws.read_message()
self.assertEqual(response, u('hello \u00e9'))
yield self.close(ws)
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect('/error_in_on_message')
ws.write_message('hello')
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIs(response, None)
yield self.close(ws)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect('/notfound')
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect('/non_ws')
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://127.0.0.1:%d/' % port,
io_loop=self.io_loop,
connect_timeout=3600)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
# Close the underlying stream.
ws.stream.close()
yield self.close_future
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header' % self.get_http_port(),
headers={'X-Test': 'hello'}))
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect('/close_reason')
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
code, reason = yield self.close_future
# The client echoed the close code it received to the server,
# so the server's close code (returned via close_future) is
# the same.
self.assertEqual(code, 1001)
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect('/echo')
ws.close(1001, 'goodbye')
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, 'goodbye')
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect('/async_prepare')
ws.write_message('hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d/something' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': '127.0.0.1:%d' % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {'Origin': 'http://somewhereelse.com'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {'Origin': 'http://subtenant.localhost'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
class CompressionTestMixin(object):
MESSAGE = 'Hello world. Testing 123 123'
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(
close_future=self.close_future,
compression_options=self.get_server_compression_options())),
])
def get_server_compression_options(self):
return None
def get_client_compression_options(self):
return None
@gen_test
def test_message_sizes(self):
ws = yield self.ws_connect(
'/echo',
compression_options=self.get_client_compression_options())
# Send the same message three times so we can measure the
# effect of the context_takeover options.
for i in range(3):
ws.write_message(self.MESSAGE)
response = yield ws.read_message()
self.assertEqual(response, self.MESSAGE)
self.assertEqual(ws.protocol._message_bytes_out, len(self.MESSAGE) * 3)
self.assertEqual(ws.protocol._message_bytes_in, len(self.MESSAGE) * 3)
self.verify_wire_bytes(ws.protocol._wire_bytes_in,
ws.protocol._wire_bytes_out)
yield self.close(ws)
class UncompressedTestMixin(CompressionTestMixin):
"""Specialization of CompressionTestMixin when we expect no compression."""
def verify_wire_bytes(self, bytes_in, bytes_out):
# Bytes out includes the 4-byte mask key per message.
self.assertEqual(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertEqual(bytes_in, 3 * (len(self.MESSAGE) + 2))
class NoCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
pass
# If only one side tries to compress, the extension is not negotiated.
class ServerOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
class ClientOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_client_compression_options(self):
return {}
class DefaultCompressionTest(CompressionTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
def get_client_compression_options(self):
return {}
def verify_wire_bytes(self, bytes_in, bytes_out):
self.assertLess(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertLess(bytes_in, 3 * (len(self.MESSAGE) + 2))
# Bytes out includes the 4 bytes mask key per message.
self.assertEqual(bytes_out, bytes_in + 12)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b'abcd', b''), b'')
self.assertEqual(self.mask(b'abcd', b'b'), b'\x03')
self.assertEqual(self.mask(b'abcd', b'54321'), b'TVPVP')
self.assertEqual(self.mask(b'ZXCV', b'98765432'), b'c`t`olpd')
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(self.mask(b'\x00\x01\x02\x03',
b'\xff\xfb\xfd\xfc\xfe\xfa'),
b'\xff\xfa\xff\xff\xfe\xfb')
self.assertEqual(self.mask(b'\xff\xfb\xfd\xfc',
b'\x00\x01\x02\x03\x04\x05'),
b'\xff\xfa\xff\xff\xfb\xfe')
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
|
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': u'unbekannter Autor'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url, True))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version, True))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author')
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base, True)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
|
|
#!/usr/bin/env python
"""Replace the old Makefile system for testing and installing """
import os
import sys
from PDielec.preader import main as main_preader
from PDielec.pdgui import main as main_pdgui
from PDielec.checkcsv import main as main_checkcsv
from PDielec.checkexcel import main as main_checkexcel
from PDielec.p2cif import main as main_p2cif
from PDielec.vibanalysis import main as main_vibanalysis
import numpy as np
import contextlib
from shutil import copyfile
import subprocess
import time
settings = {}
settings['padding'] = 50
settings['title'] = 'title'
debug = False
viewing = False
useLocal = True
test_preader = [
'Castep/preader',
'Vasp/preader',
'Phonopy/preader',
'Gulp/preader',
'Crystal/preader',
'AbInit/preader',
'QE/preader',
]
test_p2cif = [
'P2Cif/',
]
test_pdgui = [
'Castep/AsparticAcid',
'Castep/Isoleucine',
'Castep/MgO',
'Castep/Bubbles',
'Castep/Na2SO42',
'Castep/Castep17',
'Vasp/F-Apatite',
'Vasp/ZnO',
'Vasp/Na2SO42',
'Gulp/calcite',
'Gulp/Na2SO42',
'Crystal/Na2SO42',
'Crystal/Leucine',
'Crystal/Quartz',
'Crystal/ZnO/Default',
'Crystal/ZnO/CPHF',
'Crystal/ZnO/NoEckart',
'AbInit/AlAs',
'AbInit/BaTiO3',
'AbInit/Na2SO42',
'QE/ZnO',
'QE/Na2SO42',
'QE/Cocaine',
'Phonopy/ZnO',
'Phonopy/Na2SO42',
'SizeEffects/BaTiO3',
'SizeEffects/MgO',
'SizeEffects/ZnO',
'Mie/MgO',
'Mie/MgO_lognormal',
'ATR/AlAs',
'ATR/Na2SO42',
'ATR/Na2SO42_fit',
'Experiment/constant',
'Experiment/fpsq',
'Experiment/drude-lorentz',
'Experiment/interpolation',
'Experiment/Mayerhofer',
'SingleCrystal/Bi2Se3',
'SingleCrystal/Bi2Se3_film',
]
test_vibanalysis = [
'VibAnalysis/AsparticAcid',
'VibAnalysis/BaTiO3',
'VibAnalysis/Cocaine',
'VibAnalysis/F-Apatite',
'VibAnalysis/Isoleucine',
'VibAnalysis/Leucine',
]
benchmarks = [
'Castep/AsparticAcid',
'Castep/Isoleucine',
'Castep/MgO',
'Castep/Bubbles',
'Castep/Na2SO42',
'Vasp/F-Apatite',
'Vasp/ZnO',
'Vasp/Na2SO42',
'Gulp/calcite',
'Gulp/Na2SO42',
'Crystal/Na2SO42',
'Crystal/Leucine',
'Crystal/Quartz',
'Crystal/ZnO/Default',
'Crystal/ZnO/CPHF',
'Crystal/ZnO/NoEckart',
'AbInit/AlAs',
'AbInit/BaTiO3',
'AbInit/Na2SO42',
'QE/ZnO',
'QE/Na2SO42',
'QE/Cocaine',
'Phonopy/ZnO',
'Phonopy/Na2SO42',
'SizeEffects/BaTiO3',
'SizeEffects/MgO',
'SizeEffects/ZnO',
'Mie/MgO',
'Mie/MgO_lognormal',
'ATR/AlAs',
'ATR/Na2SO42',
]
def usage():
print('pdmake:', file=sys.stderr)
print(' test' , file=sys.stderr)
print(' tests' , file=sys.stderr)
print(' run all the tests ' , file=sys.stderr)
print(' test-preader' , file=sys.stderr)
print(' run all the preader tests' , file=sys.stderr)
print(' test-pdgui' , file=sys.stderr)
print(' run all the pdgui tests' , file=sys.stderr)
print(' test-p2cif' , file=sys.stderr)
print(' run all the p2cif tests' , file=sys.stderr)
print(' test-vibanalysis' , file=sys.stderr)
print(' run all the vibanalysis tests' , file=sys.stderr)
print(' benchmarks ' , file=sys.stderr)
print(' run the benchmarks ' , file=sys.stderr)
print(' --regenerate ' , file=sys.stderr)
print(' regenerate all the test reference data' , file=sys.stderr)
print(' --root directory ' , file=sys.stderr)
print(' sets the root directory for the installation' , file=sys.stderr)
print(' --directory', file=sys.stderr)
print(' Use the directory when listing the tests' , file=sys.stderr)
print(' --padding 60', file=sys.stderr)
print(' change the width of the output title field' , file=sys.stderr)
print(' --view ' , file=sys.stderr)
print(' run pdgui with windowing on' , file=sys.stderr)
print(' --usesystem ' , file=sys.stderr)
print(' Force the use of system executables' , file=sys.stderr)
print(' --debug ' , file=sys.stderr)
print(' run with debug options switched on' , file=sys.stderr)
if os.name != 'nt':
print(' pypi ' , file=sys.stderr)
print(' Only use on linux installations... ' , file=sys.stderr)
print(' prepare for pypi uploading' , file=sys.stderr)
print(' clean' , file=sys.stderr)
print(' Only use on linux installations... ' , file=sys.stderr)
print(' clean up' , file=sys.stderr)
print(' install' , file=sys.stderr)
print(' Only use on linux installations... ' , file=sys.stderr)
print(' install the executables to the scripts directory' , file=sys.stderr)
print(' --scripts directory' , file=sys.stderr)
print(' Only use on linux installations... ' , file=sys.stderr)
print(' Set the scripts directory to be used for installation' , file=sys.stderr)
print(' the default is ~/bin ' , file=sys.stderr)
exit()
def checkLocalExecutables():
'''Check to see if the rootDirectory has the executables we need'''
os.chdir(rootDirectory)
if not os.path.isfile('preader'):
return False
if not os.path.isfile('p2cif'):
return False
if not os.path.isfile('pdgui'):
return False
if not os.path.isfile('vibanalysis'):
return False
return True
def install(scripts):
print('Performing installation of scripts into ',scripts)
global rootDirectory
mychdir(rootDirectory)
print('Installing preader',end='')
subprocess.run('cp -P preader {}'.format(scripts),shell=True)
print(',p1reader',end='')
subprocess.run('cp -P p1reader {}'.format(scripts),shell=True)
print(',pdgui',end='')
subprocess.run('cp -P pdgui {}'.format(scripts),shell=True)
print(',p2cif',end='')
subprocess.run('cp -P p2cif {}'.format(scripts),shell=True)
print(',pdcompare',end='')
subprocess.run('cp -P pdcompare {}'.format(scripts),shell=True)
print(',graphdatagenerator',end='')
subprocess.run('cp -P graphdatagenerator {}'.format(scripts),shell=True)
print(',vibanalysis',end='')
subprocess.run('cp -P vibanalysis {}'.format(scripts),shell=True)
print(',pdmake',end='')
subprocess.run('cp -P pdmake {}'.format(scripts),shell=True)
print(',python scripts')
subprocess.run('mkdir -p {}/PDielec'.format(scripts),shell=True)
subprocess.run('mkdir -p {}/PDielec/GUI'.format(scripts),shell=True)
subprocess.run('cp -r PDielec/*.py {}/PDielec'.format(scripts),shell=True)
subprocess.run('cp -r PDielec/GUI/*.py {}/PDielec/GUI/'.format(scripts),shell=True)
subprocess.run('cp -r PDielec/GUI/*.png {}/PDielec/GUI/'.format(scripts),shell=True)
print('Finished installation of scripts into ',scripts)
@contextlib.contextmanager
def redirect(file):
# capture all outputs to a log file while still printing it
class Logger:
def __init__(self, file):
fd = open(file,'w')
self.terminal = sys.stdout
self.log = fd
def write(self, message):
global debug
if debug:
self.terminal.write(message)
self.log.write(message)
def __getattr__(self, attr):
return getattr(self.terminal, attr)
logger = Logger(file)
sys.stdout = logger
if not debug:
sys.stderr = open(os.devnull,'w')
try:
yield logger.log
finally:
sys.stdout = sys.__stdout__
if not debug:
sys.stderr = sys.__stderr__
def readNmaFile(file):
nmodes = 0
frequencies = []
rsquared = []
explained = []
with open(file,'r') as fd:
line = fd.readline()
while line:
splits = line.split()
if len(splits) > 0:
if splits[0] == 'Mode':
if splits[1] != 'too':
nmodes += 1
frequencies.append(float(splits[2]))
elif splits[0] == 'R**2':
rsquared.append(float(splits[2]))
elif splits[0] == 'Explained':
explained.append(float(splits[3]))
# end if
# end if
line = fd.readline()
#end while
#end with open
f = np.array(frequencies[3:])
r = np.array(rsquared[3:])
e = np.array(explained[3:])
return np.sum(f),np.sum(r),np.sum(e)
def compareNmaFiles(file1,file2):
f1,r1,e1 = readNmaFile(file1)
f2,r2,e2 = readNmaFile(file2)
nerrors = 0
if abs(f1 - f2) > 1.0e-6:
nerrors += 1
if abs(r1 - r2) > 1.0e-6:
nerrors += 1
if abs(e1 - e2) > 1.0e-6:
nerrors += 1
return nerrors
def compareFiles(file1,file2):
global debug
fd1 = open(file1,'r')
fd2 = open(file2,'r')
lines1 = fd1.readlines()
lines2 = fd2.readlines()
nerrors = 0
for line1,line2 in zip(lines1,lines2):
for word1,word2 in zip(line1.split(),line2.split()):
word1 = word1.replace('(','')
word1 = word1.replace(')','')
word1 = word1.replace('%','')
word2 = word2.replace('(','')
word2 = word2.replace(')','')
word2 = word2.replace('%','')
try:
float1 = float(word1)
float2 = float(word2)
if abs(float1)+abs(float2) > 1.e-12:
if 2*abs(float1-float2)/(abs(float1)+abs(float2)) > 1.0e-4:
if debug:
print('Float difference',float1,float2)
print('Line 1',line1)
print('Line 2',line2)
nerrors += 1
except:
if word1 != word2:
if debug:
print('Word difference',word1,word2)
print('Line 1',line1)
print('Line 2',line2)
nerrors += 1
return nerrors
def runP2CifTest(title, instructions, regenerate):
'''Run a test on p2cif'''
global rootDirectory
global useLocal
if useLocal:
sys.argv = ['python']
sys.argv.append(os.path.join(rootDirectory,'p2cif'))
else:
sys.argv = ['p2cif']
sys.argv.extend(instructions)
outputfile = 'all.cif'
if regenerate:
outputfile = 'all.ref.cif'
with open(outputfile,'w') as stdout:
if debug:
result = subprocess.run(sys.argv,stdout=stdout)
else:
result = subprocess.run(sys.argv,stdout=stdout,stderr=subprocess.DEVNULL)
# end if debug
# end with open
# If not doing a regeneration perform a check
if not regenerate:
nerrors = compareFiles('all.cif', 'all.ref.cif')
if nerrors > 0:
print(title+' {} ERRORS:'.format(nerrors))
else:
print(title+' OK:')
# end if
else:
print(title+' Regenerated:')
# end if
return
def runVibAnalysis(title, instructions, regenerate):
'''Run a vibanalysis test
title is the title in the pdmake file
instructions are the command line parameters
regenerate is set to true if the reference file is overwritten'''
global rootDirectory
global useLocal
if useLocal:
sys.argv = ['python']
sys.argv.append(os.path.join(rootDirectory,'vibanalysis'))
else:
sys.argv = ['vibanalysis']
sys.argv.extend(instructions)
filename = None
for option in instructions:
if not option.startswith('-'):
filename = option
# end if
# end for
if filename is None:
print('Error in runVibAnalysis: there is no filename specified')
header = os.path.splitext(filename)[0]
nmafile = header+'.nma'
reffile = header+'.nma.ref'
if debug:
result = subprocess.run(sys.argv)
else:
result = subprocess.run(sys.argv,stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)
# If not doing a regeneration perform a check
if not regenerate:
nerrors = compareNmaFiles(nmafile, reffile)
if nerrors > 0:
print(title+' {} ERRORS:'.format(nerrors))
else:
print(title+' OK:')
# end if
else:
copyfile(nmafile,reffile)
print(title+' Regenerated:')
# end if
return
def runPreaderTest(title, instructions, regenerate):
'''Run a preader test
title is the title in the pdmake file
instructions are the command line parameters
regenerate is set to true if the reference file is overwritten'''
global debug
global rootDirectory
global useLocal
global debug
if debug:
print('runPreaderTest:',title,instructions)
if useLocal:
sys.argv = ['python']
sys.argv.append(os.path.join(rootDirectory,'preader'))
else:
sys.argv = ['preader']
sys.argv.extend(instructions)
outputfile = 'command.csv'
if regenerate:
outputfile = 'command.ref.csv'
with open(outputfile,'w') as fd:
if debug:
result = subprocess.run(sys.argv,stdout=fd)
else:
result = subprocess.run(sys.argv,stdout=fd,stderr=subprocess.DEVNULL)
# end with
# If not doing a regeneration perform a check
if not regenerate:
sys.argv = ['checkcsv', 'command.ref.csv','command.csv']
with redirect(os.devnull):
result = main_checkcsv()
nerrors,keep_line_number,keep_word1,keep_word2,max_percentage_error = result
if nerrors > 0:
print(title+' ERRORS:'+"LARGEST ON LINE {} - max %error={}".format(nerrors, keep_line_number, max_percentage_error))
else:
print(title+' OK:'+" - max %error={}" .format(max_percentage_error))
# end if
else:
print(title+' Regenerated:')
# end if
return
def runPDGuiTest(title, instructions, regenerate, benchmarks=False):
'''Run a pdgui test
title is the title in the pdmake file
instructions are the command line parameters
regenerate is set to true if the reference file is overwritten'''
global viewing
global start_time
global debug
global rootDirectory
global useLocal
if useLocal:
sys.argv = ['python']
sys.argv.append(os.path.join(rootDirectory,'pdgui'))
else:
sys.argv = ['pdgui']
sys.argv.extend(instructions)
if debug:
result = subprocess.run(sys.argv)
else:
result = subprocess.run(sys.argv,stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)
if not viewing:
# If not doing a regeneration perform a check
if not regenerate and not benchmarks:
sys.argv = ['checkexcel', 'results.ref.xlsx','results.xlsx']
with redirect(os.devnull):
result = main_checkexcel()
nerrors,row,col,sheet,file1,value1,file2,value2,max_percentage_error = result
if nerrors > 0:
print(title+'{} ERRORS:'.format(nerrors)+"{}@{},{} - max %error={}".format(sheet,row,col, max_percentage_error))
else:
print(title+' OK:'+" - max %error={}" .format(max_percentage_error))
# end if
elif benchmarks:
end_time = time.time()
elapsed_time = end_time - start_time
start_time = end_time
print(title+' OK:'+" - elapsed time {:.3f}s" .format(elapsed_time))
elif not benchmarks:
# If we asked for a benchmarking then don't do a regenerate
copyfile('results.xlsx','results.ref.xlsx')
print(title+' Regenerated:')
# end if not regenerate
#end if not viewing
return
def changePadding(all):
maxlen = 0
for d in all:
maxlen = max(maxlen,len(d))
if maxlen < settings['padding']:
maxlen = settings['padding']
return maxlen
def runTests(testlist, testType, regenerate):
'''Run the tests given in the directories stored in testlist
testType can be pdgui, preader, p2cif or benchmark
regenerate causes the test data to be regenerated'''
global start_time
global debug
mychdir(os.path.join(rootDirectory,'Examples'))
homedir = os.getcwd()
convertTestType={}
convertTestType['pdgui'] = 'PDGui tests'
convertTestType['vibanalysis'] = 'VibAnalysis tests'
convertTestType['preader'] = 'PReader tests'
convertTestType['p2cif'] = 'P2Cif tests'
convertTestType['benchmarks'] = 'BenchMarks'
print('')
print('--------------------------------------------------')
print(convertTestType[testType],'starting' )
start_time = time.time()
test_start_time = start_time
if debug:
print('RunTests: homedir=',homedir)
pdmakefile = 'command.pdmake'
for directory in testlist:
if debug:
print('RunTest: directory=',directory)
if not os.path.isdir(directory):
print('Error: command needs to be executed in the PDielec home directory')
print(' current directory is {}'.format(homedir))
print(' required directory is {}'.format(directory))
exit()
if testType == 'benchmarks':
runPdMakefile(directory,pdmakefile,regenerate,benchmarks=True)
else:
runPdMakefile(directory,pdmakefile,regenerate)
# end for
elapsed_time = time.time() - test_start_time
print('--------------------------------------------------')
print(convertTestType[testType],'completed in {:.3f}s'.format(elapsed_time))
def readPdMakefile(directory,filename):
# Return a dictionary of instructions
# The first word is the dictionary key, the rest is its value
global settings
global debug
instructions = {}
if debug:
print('ReadPdMakefile:',directory,filename)
with open(filename,'r') as fd:
line = fd.readline()[:-1]
if settings['title'] == 'title':
title = line.ljust(settings['padding'])
else:
if len(directory)>1:
title = directory.ljust(settings['padding'])
else:
title = os.getcwd().ljust(settings['padding'])
line = fd.readline()[:-1]
while line:
if len(line)>0 and not line.startswith('#'):
full_line = line.replace('\\','')
while line.endswith('\\'):
line = fd.readline()[:-1]
full_line += line.replace('\\','')
splits = full_line.split()
key = splits[0]
value = splits[1:]
instructions[key] = value
# end if
line = fd.readline()[:-1]
# end while
#end open
return title,instructions
def mychdir(directory):
'''Local version of os.chdir with some testing'''
if directory != '':
if os.path.isdir(directory):
result = os.chdir(directory)
newdir = os.getcwd()
else:
print('mychdir: Error directory does not exist',directory)
print('mychdir: Current directory is ',os.getcwd())
# end if
# end if
return
def runPdMakefile(directory,pdmakefile,regenerate,benchmarks=False):
'''Run specific pdMakefile'''
global debug
homedir = os.getcwd()
if debug:
print('RunPdMakefile: directory =',directory)
print('RunPdMakefile: pdmakefile =',pdmakefile)
print('RunPdMakefile: cwd =',homedir)
#directory_,filename = os.path.split(pdmakefile)
#if debug:
# print('RunPdMakefile: directory_ =',directory_)
# print('RunPdMakefile: filename =',filename)
#mychdir(directory_)
mychdir(directory)
title,instructions = readPdMakefile(directory,pdmakefile)
if debug:
print('RunPdMakefile: title =',title)
print('RunPdMakefile: instructions =',instructions)
for key in instructions:
parameters = instructions[key]
if key == 'preader':
runPreaderTest(title,parameters,regenerate)
elif key == 'vibanalysis':
runVibAnalysis(title,parameters,regenerate)
elif key == 'p2cif':
runP2CifTest(title,parameters,regenerate)
elif key == 'pdgui':
if viewing:
parameters.extend(['-script', 'script.py'])
elif benchmarks:
parameters.extend(['-nosplash', '-exit', '-script', 'script.py'])
else:
parameters.extend(['-nosplash', 'results.xlsx', '-exit', '-script', 'script.py'])
if debug:
parameters.append('-d')
runPDGuiTest(title,parameters,regenerate,benchmarks=benchmarks)
mychdir(homedir)
def runClean():
if os.name == 'nt':
print('Unable to clean installation in Windows')
return
print('Cleaning old results from the Examples directory')
os.chdir(rootDirectory)
subprocess.run('find . -name results.xlsx -exec rm -f {} \;',shell=True)
subprocess.run('find . -name results.csv -exec rm -f {} \;',shell=True)
subprocess.run('find . -name all.cif -exec rm -f {} \;',shell=True)
subprocess.run('find . -name \*.nma -exec rm -f {} \;',shell=True)
print('Cleaning complete')
def runPyInstaller():
if os.name == 'nt':
print('Unable to create pyInstaller installation in Windows')
return
print('Creating pyinstaller files in dis/pdgui ')
os.chdir(rootDirectory)
subprocess.run('pyinstaller pdgui.spec -y',shell=True)
subprocess.run('cp -r dist/pdgui/PyQt5/Qt/plugins/platforms dist/pdgui',shell=True)
def runPyPi():
if os.name == 'nt':
print('Unable to create PyPi installation in Windows')
return
print('Creating PyPi distribution files')
os.chdir(rootDirectory)
subprocess.run('rm -rf build dist PDielec.egg-info; python setup.py sdist bdist_wheel',shell=True)
def testForRootDirectory(path):
test = os.path.join(path,'Examples')
if not os.path.isdir(test):
return False
# test = os.path.join(path,'PDielec')
# if not os.path.isdir(test):
# return False
# test = os.path.join(path,'Sphinx')
# if not os.path.isdir(test):
# return False
return True
def findRootDirectory(start):
'''Find the root directory starting from start'''
lastpath = start
path = os.path.join(start,'Examples')
while path != lastpath:
lastpath = path
path,tail=os.path.split(path)
if testForRootDirectory(path):
return path
print('Unable to locate root directory')
exit()
def main():
global debug
global viewing
global settings
global rootDirectory
global useLocal
# Start processing the directories
originalDirectory = os.getcwd()
if len(sys.argv) <= 1 :
usage()
tokens = sys.argv[1:]
ntokens = len(tokens)-1
itoken = -1
regenerate = False
scriptsDirectory='~/bin'
command = sys.argv[0]
#rootDirectory,command = os.path.split(command)
#if rootDirectory == '':
# rootDirectory = originalDirectory
rootDirectory = originalDirectory
rootDirectory = findRootDirectory(rootDirectory)
actions = []
pdmakefiles = []
# Loop over the tokens on the command line
while itoken < ntokens:
itoken += 1
token = tokens[itoken]
if token == 'test' or token == 'tests':
actions.append('test all')
elif token == 'test-preader':
actions.append('test preader')
elif token == 'test-p2cif':
actions.append('test p2cif')
elif token == 'test-pdgui':
actions.append('test pdgui')
elif token == 'test-vibanalysis':
actions.append('test vibanalysis')
elif token == 'benchmarks':
actions.append('run benchmarks')
elif token == '-root' or token == '--root':
itoken += 1
rootDirectory = tokens[itoken]
elif token == '-debug' or token == '--debug' or token == '-d':
debug = True
elif token == '-usesystem':
useLocal = False
elif token == '-view' or token == '--view' or token == '-v':
viewing = True
elif token == '-regenerate' or token == '--regenerate':
regenerate = True
elif token == '-padding' or token == '--padding':
itoken += 1
settings['padding'] = int(tokens[itoken])
elif token == '-directory' or token == '--directory':
itoken += 1
settings['title'] = 'directory'
elif token == 'pypi':
actions.append('run pypi')
# elif token == 'pyinstaller':
# actions.append('run pyinstaller')
elif token == 'clean':
actions.append('run clean')
elif token == 'install':
actions.append('install')
elif token == 'scripts' or token == '-scripts':
itoken += 1
scriptsDirectory = tokens[itoken]
elif token.endswith('.pdmake'):
pdmakefiles.append(token)
else:
usage()
exit();
# endif
# end while
#
# Change to the rootDirectory
if not os.path.isdir(rootDirectory):
print('Error: The root directory is not valid',rootDirectory)
exit()
mychdir(rootDirectory)
#
# Check to see if the required executables are present
# If the are then use them
#
if useLocal:
if checkLocalExecutables():
useLocal = True
else:
useLocal = False
# Change directory to the Examples directory
if not os.path.isdir('Examples'):
print('Error: command needs to be executed in the PDielec home directory')
exit()
#
# Handle any pdmake files there might be
#
for pdmakefile in pdmakefiles:
directory = originalDirectory
runPdMakefile(directory,pdmakefile,regenerate)
#
# Now loop over the actions
#
mychdir('Examples')
if settings['title'] != 'title':
test_all = []
test_all.extend(test_p2cif)
test_all.extend(test_preader)
test_all.extend(test_pdgui)
test_all.extend(test_vibanalysis)
settings['padding'] = changePadding(test_all)
#
for action in actions:
if action == 'test all':
runTests(test_p2cif ,'p2cif' ,regenerate)
runTests(test_preader ,'preader' ,regenerate)
runTests(test_pdgui ,'pdgui' ,regenerate)
runTests(test_vibanalysis,'vibanalysis',regenerate)
elif action == 'test preader':
runTests(test_preader ,'preader' ,regenerate)
elif action == 'test p2cif':
runTests(test_p2cif ,'p2cif' ,regenerate)
elif action == 'test pdgui':
runTests(test_pdgui ,'pdgui' ,regenerate)
elif action == 'test vibanalysis':
runTests(test_vibanalysis,'vibanalysis',regenerate)
elif action == 'run benchmarks':
runTests(benchmarks,'benchmarks',regenerate)
elif action == 'run pypi':
runPyPi()
elif action == 'run pyinstaller':
runPyInstaller()
elif action == 'run clean':
runClean()
elif action == 'install':
install(scriptsDirectory)
else:
usage()
exit();
#end if
#end for
# end of def main
if __name__ == '__main__':
main(sys)
|
|
"""Tests for the seqls script (seqparse.cli.seqls)."""
# "Future" Libraries
from __future__ import print_function
# Standard Libraries
import copy
import os
import shlex
import time
import unittest
# Third Party Libraries
import mock
from builtins import range
from . import (DirEntry, generate_entries, initialise_mock_scandir_data,
mock_scandir_deep)
from .. import get_version
from ..cli import seqls
from ..sequences import FileSequence, FrameChunk
###############################################################################
# class: TestFrameSequences
class TestSeqls(unittest.TestCase):
"""Test basic functionality on the seqls script."""
_test_ext = "exr"
_test_file_name = "TEST_DIR"
_test_root = "test_dir"
_singletons = ["singleton0.jpg", "singleton1.jpg"]
def setUp(self):
"""Set up the test instance."""
pass
def test_parse_args(self):
"""Seqls: Test seqls argument parsing."""
defaults = dict(
all=False,
human_readable=False,
long_format=False,
max_levels=[-1],
min_levels=[-1],
missing=False,
search_path=["."],
seqs_only=False,
version=False)
args = vars(seqls.parse_args([]))
self.assertEqual(args, defaults)
# yapf: disable
data = [
(["-laH"], dict(all=True, human_readable=True, long_format=True)),
(shlex.split("test_dir -l"), dict(
search_path=["test_dir"], long_format=True)),
(shlex.split("--maxdepth 0"), dict(max_levels=[0])),
(["test_dir"], dict(search_path=["test_dir"])),
(shlex.split("--maxdepth 0 --mindepth 2"), dict(
max_levels=[0], min_levels=[2])),
(shlex.split("--maxdepth 1 -S"), dict(
max_levels=[1], seqs_only=True)),
(shlex.split("-m test_dir"), dict(
missing=True, search_path=["test_dir"], seqs_only=True))]
# yapf: enable
for input_args, updated_options in data:
expected_options = copy.deepcopy(defaults)
expected_options.update(updated_options)
self.assertEqual(expected_options,
vars(seqls.parse_args(input_args)))
@mock.patch("seqparse.seqparse.scandir")
def test_seqls_with_arguments(self, mock_api_call):
"""Seqls: Test seqls with supplied arguments."""
mock_api_call.side_effect = mock_scandir_deep
args = seqls.parse_args(["--version"])
version = seqls.main(args, _debug=True)
self.assertEqual(version, get_version(pretty=True))
print("\n SEQUENCES\n ---------")
initialise_mock_scandir_data(
os.path.join(os.getcwd(), self._test_root))
args = seqls.parse_args(["test_dir"])
seqs = list(seqls.main(args, _debug=True))
for seq in seqs:
print(" ", seq)
print("\n MAX LEVELS\n ----------")
for max_levels in range(-1, 4):
initialise_mock_scandir_data(
os.path.join(os.getcwd(), self._test_root))
args = seqls.parse_args(
shlex.split("test_dir --maxdepth {:d}".format(max_levels)))
seqs = list(seqls.main(args, _debug=True))
expected_seqs = max_levels + 2
if max_levels == -1:
expected_seqs = 5
blurb = " o max_levels == {:d}: {:d} ({:d} expected) entries"
print(blurb.format(max_levels, len(seqs), expected_seqs))
for seq in seqs:
print(" -", seq)
self.assertEqual(len(seqs), expected_seqs)
print("\n MIN LEVELS\n ----------")
for min_levels in range(-1, 4):
initialise_mock_scandir_data(
os.path.join(os.getcwd(), self._test_root))
args = seqls.parse_args(
shlex.split("test_dir --mindepth {:d}".format(min_levels)))
seqs = list(seqls.main(args, _debug=True))
expected_seqs = 3 - min_levels
if min_levels == -1:
expected_seqs = 5
blurb = " o min_levels == {:d}: {:d} ({:d} expected) entries"
print(blurb.format(min_levels, len(seqs), expected_seqs))
for seq in seqs:
print(" -", seq)
self.assertEqual(len(seqs), expected_seqs)
print("")
@mock.patch("seqparse.seqparse.scandir")
def test_singletons(self, mock_api_call):
"""Seqls: Test file singleton discovery from disk location."""
output = [os.path.join(self._test_root, x) for x in self._singletons]
entries = list()
for file_name in output:
entries.append(DirEntry(file_name))
mock_api_call.return_value = iter(entries)
args = seqls.parse_args(["test_dir"])
file_names = list(seqls.main(args, _debug=True))
self.assertEqual(sorted(file_names), output)
# Test seqs_only option ...
args = seqls.parse_args(["test_dir", "-S"])
file_names = list(seqls.main(args, _debug=True))
self.assertEqual(file_names, [])
@mock.patch("seqparse.seqparse.scandir")
def test_missing_option(self, mock_api_call):
"""Seqls: Test missing option."""
file_path = os.path.join(self._test_root, self._test_file_name)
chunk_in = FrameChunk(first=1, last=11, step=2, pad=4)
fseq = FileSequence(
name=file_path, ext=self._test_ext, frames=chunk_in)
input_entries = list(map(DirEntry, fseq))
mock_api_call.return_value = iter(input_entries)
chunk_out = FrameChunk(first=2, last=10, step=2, pad=4)
expected = FileSequence(
name=file_path, ext=self._test_ext, frames=chunk_out)
args = seqls.parse_args(["test_dir", "-m"])
inverted = seqls.main(args, _debug=True)
self.assertEqual(len(inverted), 1)
print("\n\n SEQUENCE\n --------")
print(" input files: ", fseq)
print(" expected files:", expected)
print(" inverted files:", inverted[0])
self.assertEqual(inverted[0], str(expected))
@mock.patch("seqparse.seqparse.scandir")
def test_long_format_option(self, mock_api_call):
"""Seqls: Test the long-format option."""
frames = {4: (1, 2, 3, 4, 6)}
root_dir = os.path.join(os.getcwd(), self._test_root)
input_entries = generate_entries(
name="test", ext="py", frames=frames, root=root_dir)
input_entries.extend(
generate_entries(
name=".test", ext="py", frames=frames, root=self._test_root))
input_entries.append(DirEntry(os.path.join(root_dir, "pony.py")))
file_date = time.strftime('%Y/%m/%d %H:%M', time.localtime(1490997828))
fseq_date = time.strftime('%Y/%m/%d %H:%M', time.localtime(1490908305))
opts = dict(file_date=file_date, fseq_date=fseq_date, root=root_dir)
mock_api_call.return_value = input_entries
args = seqls.parse_args(["test_dir", "-l"])
output = seqls.main(args, _debug=True)
expected = [
"36520 {fseq_date} {root}/test.0001-0004,0006.py",
"9436 {file_date} {root}/pony.py"
]
expected = [x.replace("/", os.sep).format(**opts) for x in expected]
self.assertEqual(len(output), 2)
self.assertEqual(output, expected)
args = seqls.parse_args(["test_dir", "-l", "-H"])
output = seqls.main(args, _debug=True)
expected = [
"35.7K {fseq_date} {root}/test.0001-0004,0006.py",
"9.2K {file_date} {root}/pony.py"
]
expected = [x.replace("/", os.sep).format(**opts) for x in expected]
self.assertEqual(len(output), 2)
self.assertEqual(output, expected)
args = seqls.parse_args(["test_dir", "-l", "-m"])
output = seqls.main(args, _debug=True)
fseq_date = time.strftime('%Y/%m/%d %H:%M', time.localtime(None))
opts = dict(fseq_date=fseq_date, root=root_dir)
expected = ["---- {fseq_date} {root}/test.0005.py"]
expected = [x.replace("/", os.sep).format(**opts) for x in expected]
self.assertEqual(len(output), 1)
self.assertEqual(output, expected)
@mock.patch("seqparse.seqparse.scandir")
def test_all_option(self, mock_api_call):
"""Seqls: Test the all option."""
frames = {4: (1, 2, 3, 4, 6)}
root_dir = os.path.join(os.getcwd(), self._test_root)
input_entries = generate_entries(
name="test", ext="py", frames=frames, root=root_dir)
input_entries.extend(
generate_entries(
name=".test", ext="py", frames=frames, root=root_dir))
mock_api_call.return_value = input_entries
args = seqls.parse_args(["test_dir"])
output = seqls.main(args, _debug=True)
expected = [
os.path.join(root_dir, ".test.0001-0004,0006.py"),
os.path.join(root_dir, "test.0001-0004,0006.py")
]
self.assertEqual(len(output), 1)
self.assertEqual(output[0], expected[1])
args = seqls.parse_args(["test_dir", "-a"])
output = seqls.main(args, _debug=True)
self.assertEqual(len(output), 2)
self.assertEqual(output, expected)
|
|
# The MIT License
#
# Copyright (c) 2015 Scriber (http://scriber.io)
# Copyright (c) 2010-2011 Stripe (http://stripe.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
import textwrap
import warnings
from scriber import error, util
# - Requests is the preferred HTTP library
# - Google App Engine has urlfetch
# - Use Pycurl if it's there (at least it verifies SSL certs)
# - Fall back to urllib2 with a warning if needed
try:
import urllib2
except ImportError:
pass
try:
import pycurl
except ImportError:
pycurl = None
try:
import requests
except ImportError:
requests = None
else:
try:
# Require version 0.8.8, but don't want to depend on distutils
version = requests.__version__
major, minor, patch = [int(i) for i in version.split('.')]
except Exception:
# Probably some new-fangled version, so it should support verify
pass
else:
if (major, minor, patch) < (0, 8, 8):
sys.stderr.write(
'Warning: the Scriber library requires that your Python '
'"requests" library be newer than version 0.8.8, but your '
'"requests" library is version %s. Scriber will fall back to '
'an alternate HTTP library so everything should work. We '
'recommend upgrading your "requests" library. If you have any '
'questions, please contact support@scriber.io. (HINT: running '
'"pip install -U requests" should upgrade your requests '
'library to the latest version.)' % (version,))
requests = None
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
def new_default_http_client(*args, **kwargs):
if urlfetch:
impl = UrlFetchClient
elif requests:
impl = RequestsClient
elif pycurl:
impl = PycurlClient
else:
impl = Urllib2Client
warnings.warn(
"Warning: the Scriber library is falling back to urllib2/urllib "
"because neither requests nor pycurl are installed. "
"urllib2's SSL implementation doesn't verify server "
"certificates. For improved security, we suggest installing "
"requests.")
return impl(*args, **kwargs)
class HTTPClient(object):
def __init__(self, verify_ssl_certs=True):
self._verify_ssl_certs = verify_ssl_certs
def request(self, method, url, headers, post_data=None):
raise NotImplementedError(
'HTTPClient subclasses must implement `request`')
class RequestsClient(HTTPClient):
name = 'requests'
def request(self, method, url, headers, post_data=None):
kwargs = {}
if self._verify_ssl_certs:
kwargs['verify'] = os.path.join(
os.path.dirname(__file__), 'data/ca-certificates.crt')
else:
kwargs['verify'] = False
try:
try:
result = requests.request(method,
url,
headers=headers,
data=post_data,
timeout=80,
**kwargs)
except TypeError as e:
raise TypeError(
'Warning: It looks like your installed version of the '
'"requests" library is not compatible with Scriber\'s '
'usage thereof. (HINT: The most likely cause is that '
'your "requests" library is out of date. You can fix '
'that by running "pip install -U requests".) The '
'underlying error was: %s' % (e,))
# This causes the content to actually be read, which could cause
# e.g. a socket timeout. TODO: The other fetch methods probably
# are susceptible to the same and should be updated.
content = result.content
status_code = result.status_code
except Exception as e:
# Would catch just requests.exceptions.RequestException, but can
# also raise ValueError, RuntimeError, etc.
self._handle_request_error(e)
return content, status_code
def _handle_request_error(self, e):
if isinstance(e, requests.exceptions.RequestException):
msg = ("Unexpected error communicating with Scriber. "
"If this problem persists, let us know at "
"support@scriber.io.")
err = "%s: %s" % (type(e).__name__, str(e))
else:
msg = ("Unexpected error communicating with Scriber. "
"It looks like there's probably a configuration "
"issue locally. If this problem persists, let us "
"know at support@scriber.io.")
err = "A %s was raised" % (type(e).__name__,)
if str(e):
err += " with error message %s" % (str(e),)
else:
err += " with no error message"
msg = textwrap.fill(msg) + "\n\n(Network error: %s)" % (err,)
raise error.APIConnectionError(msg)
class UrlFetchClient(HTTPClient):
name = 'urlfetch'
def request(self, method, url, headers, post_data=None):
try:
result = urlfetch.fetch(
url=url,
method=method,
headers=headers,
# Google App Engine doesn't let us specify our own cert bundle.
# However, that's ok because the CA bundle they use recognizes
# scriber.io.
validate_certificate=self._verify_ssl_certs,
# GAE requests time out after 60 seconds, so make sure we leave
# some time for the application to handle a slow Scriber
deadline=55,
payload=post_data
)
except urlfetch.Error as e:
self._handle_request_error(e, url)
return result.content, result.status_code
def _handle_request_error(self, e, url):
if isinstance(e, urlfetch.InvalidURLError):
msg = ("The Scriber library attempted to fetch an "
"invalid URL (%r). This is likely due to a bug "
"in the Scriber Python bindings. Please let us know "
"at support@scriber.io." % (url,))
elif isinstance(e, urlfetch.DownloadError):
msg = "There was a problem retrieving data from Scriber."
elif isinstance(e, urlfetch.ResponseTooLargeError):
msg = ("There was a problem receiving all of your data from "
"Scriber. This is likely due to a bug in Scriber. "
"Please let us know at support@scriber.io.")
else:
msg = ("Unexpected error communicating with Scriber. If this "
"problem persists, let us know at support@scriber.io.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + str(e) + ")"
raise error.APIConnectionError(msg)
class PycurlClient(HTTPClient):
name = 'pycurl'
def request(self, method, url, headers, post_data=None):
s = util.StringIO.StringIO()
curl = pycurl.Curl()
if method == 'get':
curl.setopt(pycurl.HTTPGET, 1)
elif method == 'post':
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDS, post_data)
else:
curl.setopt(pycurl.CUSTOMREQUEST, method.upper())
# pycurl doesn't like unicode URLs
curl.setopt(pycurl.URL, util.utf8(url))
curl.setopt(pycurl.WRITEFUNCTION, s.write)
curl.setopt(pycurl.NOSIGNAL, 1)
curl.setopt(pycurl.CONNECTTIMEOUT, 30)
curl.setopt(pycurl.TIMEOUT, 80)
curl.setopt(pycurl.HTTPHEADER, ['%s: %s' % (k, v)
for k, v in headers.iteritems()])
if self._verify_ssl_certs:
curl.setopt(pycurl.CAINFO, os.path.join(
os.path.dirname(__file__), 'data/ca-certificates.crt'))
else:
curl.setopt(pycurl.SSL_VERIFYHOST, False)
try:
curl.perform()
except pycurl.error as e:
self._handle_request_error(e)
rbody = s.getvalue()
rcode = curl.getinfo(pycurl.RESPONSE_CODE)
return rbody, rcode
def _handle_request_error(self, e):
if e[0] in [pycurl.E_COULDNT_CONNECT,
pycurl.E_COULDNT_RESOLVE_HOST,
pycurl.E_OPERATION_TIMEOUTED]:
msg = ("Could not connect to Scriber. Please check your "
"internet connection and try again. If this problem "
"persists, you should check Scriber's service status at "
"https://twitter.com/scriber, or let us know at "
"support@scriber.io.")
elif (e[0] in [pycurl.E_SSL_CACERT,
pycurl.E_SSL_PEER_CERTIFICATE]):
msg = ("Could not verify Scriber's SSL certificate. Please make "
"sure that your network is not intercepting certificates. "
"If this problem persists, let us know at "
"support@scriber.io.")
else:
msg = ("Unexpected error communicating with Scriber. If this "
"problem persists, let us know at support@scriber.io.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + e[1] + ")"
raise error.APIConnectionError(msg)
class Urllib2Client(HTTPClient):
if sys.version_info >= (3, 0):
name = 'urllib.request'
else:
name = 'urllib2'
def request(self, method, url, headers, post_data=None):
if sys.version_info >= (3, 0) and isinstance(post_data, basestring): # noqa
post_data = post_data.encode('utf-8')
req = urllib2.Request(url, post_data, headers)
if method not in ('get', 'post'):
req.get_method = lambda: method.upper()
try:
response = urllib2.urlopen(req)
rbody = response.read()
rcode = response.code
except urllib2.HTTPError as e:
rcode = e.code
rbody = e.read()
except (urllib2.URLError, ValueError) as e:
self._handle_request_error(e)
return rbody, rcode
def _handle_request_error(self, e):
msg = ("Unexpected error communicating with Scriber. "
"If this problem persists, let us know at support@scriber.io.")
msg = textwrap.fill(msg) + "\n\n(Network error: " + str(e) + ")"
raise error.APIConnectionError(msg)
|
|
import yaml
import socket
import argparse
import datetime
import setproctitle
import gevent
import gevent.hub
import signal
import subprocess
import powerpool
import time
import logging
import sys
from gevent_helpers import BlockingDetector
from gevent import sleep
from gevent.monkey import patch_all
from gevent.server import DatagramServer
patch_all()
from .utils import import_helper
from .lib import MinuteStatManager, SecondStatManager, Component
from .jobmanagers import Jobmanager
from .reporters import Reporter
from .stratum_server import StratumServer
def main():
parser = argparse.ArgumentParser(description='Run powerpool!')
parser.add_argument('config', type=argparse.FileType('r'),
help='yaml configuration file to run with')
parser.add_argument('-d', '--dump-config', action="store_true",
help='print the result of the YAML configuration file and exit')
parser.add_argument('-s', '--server-number', type=int, default=0,
help='increase the configued server_number by this much')
args = parser.parse_args()
# override those defaults with a loaded yaml config
raw_config = yaml.load(args.config) or {}
if args.dump_config:
import pprint
pprint.pprint(raw_config)
exit(0)
PowerPool.from_raw_config(raw_config, vars(args)).start()
class PowerPool(Component, DatagramServer):
""" This is a singelton class that manages starting/stopping of the server,
along with all statistical counters rotation schedules. It takes the raw
config and distributes it to each module, as well as loading dynamic modules.
It also handles logging facilities by being the central logging registry.
Each module can "register" a logger with the main object, which attaches
it to configured handlers.
"""
manager = None
gl_methods = ['_tick_stats']
defaults = dict(procname="powerpool",
term_timeout=10,
extranonce_serv_size=4,
extranonce_size=4,
default_component_log_level='INFO',
loggers=[{'type': 'StreamHandler', 'level': 'NOTSET'}],
events=dict(enabled=False, port=8125, host="127.0.0.1"),
datagram=dict(enabled=False, port=6855, host="127.0.0.1"),
server_number=0,
algorithms=dict(
x11={"module": "drk_hash.getPoWHash",
"hashes_per_share": 4294967296}, # 2^32
scrypt={"module": "ltc_scrypt.getPoWHash",
"hashes_per_share": 65536}, # 2^16
scryptn={"module": "vtc_scrypt.getPoWHash",
"hashes_per_share": 65536},
blake256={"module": "blake_hash.getPoWHash",
"hashes_per_share": 65536},
sha256={"module": "cryptokit.sha256d",
"hashes_per_share": 4294967296},
lyra2re={"module": "lyra2re_hash.getPoWHash",
"hashes_per_share": 33554432} # 2^25
))
@classmethod
def from_raw_config(self, raw_config, args):
components = {}
types = [PowerPool, Reporter, Jobmanager, StratumServer]
component_types = {cls.__name__: [] for cls in types}
component_types['other'] = []
for key, config in raw_config.iteritems():
typ = import_helper(config['type'])
# Pass the commandline arguments to the manager component
if issubclass(typ, PowerPool):
config['args'] = args
obj = typ(config)
obj.key = key
for typ in types:
if isinstance(obj, typ):
component_types[typ.__name__].append(obj)
break
else:
component_types['other'].append(obj)
components[key] = obj
pp = component_types['PowerPool'][0]
assert len(component_types['PowerPool']) == 1
pp.components = components
pp.component_types = component_types
return pp
def __init__(self, config):
self._configure(config)
self._log_handlers = []
# Parse command line args
self.config['server_number'] += self.config['args']['server_number']
self.config['procname'] += "_{}".format(self.config['server_number'])
# setup all our log handlers
for log_cfg in self.config['loggers']:
if log_cfg['type'] == "StreamHandler":
kwargs = dict(stream=sys.stdout)
else:
kwargs = dict()
handler = getattr(logging, log_cfg['type'])(**kwargs)
log_level = getattr(logging, log_cfg['level'].upper())
handler.setLevel(log_level)
fmt = log_cfg.get('format', '%(asctime)s [%(name)s] [%(levelname)s] %(message)s')
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
self._log_handlers.append((log_cfg.get('listen'), handler))
self.logger = self.register_logger(self.__class__.__name__)
setproctitle.setproctitle(self.config['procname'])
self.version = powerpool.__version__
self.version_info = powerpool.__version_info__
self.sha = getattr(powerpool, '__sha__', "unknown")
self.rev_date = getattr(powerpool, '__rev_date__', "unknown")
if self.sha == "unknown":
# try and fetch the git version information
try:
output = subprocess.check_output("git show -s --format='%ci %h'",
shell=True).strip().rsplit(" ", 1)
self.sha = output[1]
self.rev_date = output[0]
# celery won't work with this, so set some default
except Exception as e:
self.logger.info("Unable to fetch git hash info: {}".format(e))
self.algos = {}
self.server_start = datetime.datetime.utcnow()
self.logger.info("=" * 80)
self.logger.info("PowerPool stratum server ({}) starting up..."
.format(self.config['procname']))
if __debug__:
self.logger.warn(
"Python not running in optimized mode. For better performance "
"set enviroment variable PYTHONOPTIMIZE=2")
# Only try to detect blocking if running in debug mode.
# NOTE: BlockingDetector can cause (rare) PowerPool crashes
gevent.spawn(BlockingDetector(raise_exc=False))
# Detect and load all the hash functions we can find
for name, algo_data in self.config['algorithms'].iteritems():
self.algos[name] = algo_data.copy()
self.algos[name]['name'] = name
mod = algo_data['module']
try:
self.algos[name]['module'] = import_helper(mod)
except ImportError:
self.algos[name]['module'] = None
else:
self.logger.info("Enabling {} hashing algorithm from module {}"
.format(name, mod))
self.event_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.events_enabled = self.config['events']['enabled']
if self.events_enabled:
self.logger.info("Transmitting statsd formatted stats to {}:{}".format(
self.config['events']['host'], self.config['events']['port']))
self.events_address = (self.config['events']['host'].encode('utf8'),
self.config['events']['port'])
# Setup all our stat managers
self._min_stat_counters = []
self._sec_stat_counters = []
if self.config['datagram']['enabled']:
listener = (self.config['datagram']['host'],
self.config['datagram']['port'] +
self.config['server_number'])
self.logger.info("Turning on UDP control server on {}"
.format(listener))
DatagramServer.__init__(self, listener, spawn=None)
def handle(self, data, address):
self.logger.info("Recieved new command {}".format(data))
parts = data.split(" ")
try:
component = self.components[parts[0]]
func = getattr(component, parts[1])
kwargs = {}
args = []
for arg in parts[2:]:
if "=" in arg:
k, v = arg.split("=", 1)
kwargs[k] = v
else:
args.append(arg)
if kwargs.pop('__spawn', False):
gevent.spawn(func, *args, **kwargs)
else:
func(*args, **kwargs)
except AttributeError:
self.logger.warn("Component {} doesn't have a method {}"
.format(*parts))
except KeyError:
self.logger.warn("Component {} doesn't exist".format(*parts))
except Exception:
self.logger.warn("Error in called function {}!".format(data),
exc_info=True)
def log_event(self, event):
if self.events_enabled:
self.event_socket.sendto(event, self.events_address)
def start(self):
self.register_logger("gevent_helpers")
for comp in self.components.itervalues():
comp.manager = self
comp.counters = self.register_stat_counters(comp, comp.one_min_stats, comp.one_sec_stats)
if comp is not self:
comp.logger = self.register_logger(comp.name)
comp.start()
# Starts the greenlet
Component.start(self)
# Start the datagram control server if it's been inited
if self.config['datagram']['enabled']:
DatagramServer.start(self, )
# This is the main thread of execution, so just continue here waiting
# for exit signals
######
# Register shutdown signals
gevent.signal(signal.SIGUSR1, self.dump_objgraph)
gevent.signal(signal.SIGHUP, exit, "SIGHUP")
gevent.signal(signal.SIGINT, exit, "SIGINT")
gevent.signal(signal.SIGTERM, exit, "SIGTERM")
try:
gevent.wait()
# Allow a force exit from multiple exit signals
finally:
self.logger.info("Exiting requested, allowing {} seconds for cleanup."
.format(self.config['term_timeout']))
try:
for comp in self.components.itervalues():
self.logger.debug("Calling stop on component {}".format(comp))
comp.stop()
if gevent.wait(timeout=self.config['term_timeout']):
self.logger.info("All threads exited normally")
else:
self.logger.info("Timeout reached, shutting down forcefully")
except gevent.GreenletExit:
self.logger.info("Shutdown requested again by system, "
"exiting without cleanup")
self.logger.info("Exit")
self.logger.info("=" * 80)
def dump_objgraph(self):
import gc
gc.collect()
import objgraph
print "Dumping object growth ****"
objgraph.show_growth(limit=100)
print "****"
def exit(self, signal=None):
""" Handle an exit request """
self.logger.info("{} {}".format(signal, "*" * 80))
# Kill the top level greenlet
gevent.kill(gevent.hub.get_hub().parent)
@property
def status(self):
""" For display in the http monitor """
return dict(uptime=str(datetime.datetime.utcnow() - self.server_start),
server_start=str(self.server_start),
version=dict(
version=self.version,
version_info=self.version_info,
sha=self.sha,
rev_date=self.rev_date)
)
def _tick_stats(self):
""" A greenlet that handles rotation of statistics """
last_tick = int(time.time())
last_send = (last_tick // 60) * 60
while True:
now = time.time()
# time to rotate minutes?
if now > (last_send + 60):
for manager in self._min_stat_counters:
manager.tock()
for manager in self._sec_stat_counters:
manager.tock()
last_send += 60
# time to tick?
if now > (last_tick + 1):
for manager in self._sec_stat_counters:
manager.tick()
last_tick += 1
sleep(last_tick - time.time() + 1.0)
def register_logger(self, name):
logger = logging.getLogger(name)
logger.setLevel(getattr(logging, self.config['default_component_log_level']))
for keys, handler in self._log_handlers:
# If the keys are blank then we assume it wants all loggers
# registered
if not keys or name in keys:
logger.addHandler(handler)
return logger
def register_stat_counters(self, comp, min_counters, sec_counters=None):
""" Creates and adds the stat counters to internal tracking dictionaries.
These dictionaries are iterated to perform stat rotation, as well
as accessed to perform stat logging """
counters = {}
for key in min_counters:
new = MinuteStatManager()
new.owner = comp
new.key = key
counters[key] = new
self._min_stat_counters.append(new)
for key in sec_counters or []:
new = SecondStatManager()
new.owner = comp
new.key = key
counters[key] = new
self._sec_stat_counters.append(new)
return counters
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ArtefactRepresentation.photographer'
db.add_column('mediaman_artefactrepresentation', 'photographer',
self.gf('django.db.models.fields.CharField')(default='', max_length=200, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ArtefactRepresentation.photographer'
db.delete_column('mediaman_artefactrepresentation', 'photographer')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cat.accessstatus': {
'Meta': {'object_name': 'AccessStatus'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'cat.acquisitionmethod': {
'Meta': {'object_name': 'AcquisitionMethod'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'preposition': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'cat.artefacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ArtefactType'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'}),
'see_also': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'})
},
'cat.category': {
'Meta': {'ordering': "['parent__name', 'name']", 'unique_together': "(('slug', 'parent'), ('name', 'parent'))", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'icon_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'icon_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cat.Category']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'suggested_artefact_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'categories'", 'null': 'True', 'to': "orm['cat.ArtefactType']"})
},
'cat.culturalbloc': {
'Meta': {'ordering': "['name']", 'object_name': 'CulturalBloc'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30', 'db_index': 'True'})
},
'cat.functionalcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalCategory'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cat.loanstatus': {
'Meta': {'object_name': 'LoanStatus'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'cat.museumobject': {
'Meta': {'ordering': "['registration_number']", 'object_name': 'MuseumObject'},
'access_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.AccessStatus']", 'null': 'True'}),
'acquisition_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'acquisition_method': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.AcquisitionMethod']", 'null': 'True'}),
'artefact_illustrated': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'artefact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.ArtefactType']"}),
'assoc_cultural_group': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cat.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'category_illustrated': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'circumference': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'collector': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'collected_objects'", 'null': 'True', 'to': "orm['parties.Person']"}),
'collector_2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'collected_objects_2'", 'null': 'True', 'to': "orm['parties.Person']"}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'country': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': "orm['location.Country']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cultural_bloc': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.CulturalBloc']", 'null': 'True', 'blank': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'donor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donated_objects'", 'null': 'True', 'to': "orm['parties.Person']"}),
'donor_2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donated_objects_2'", 'null': 'True', 'to': "orm['parties.Person']"}),
'exhibition_history': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'functional_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.FunctionalCategory']", 'null': 'True', 'blank': 'True'}),
'global_region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['location.GlobalRegion']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'how_collector_obtained': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'collector_obtained'", 'null': 'True', 'to': "orm['cat.Obtained']"}),
'how_donor_obtained': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'donor_obtained'", 'null': 'True', 'to': "orm['cat.Obtained']"}),
'how_source_obtained': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_obtained'", 'null': 'True', 'to': "orm['cat.Obtained']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indigenous_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'is_public_comment': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'loan_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.LoanStatus']", 'null': 'True'}),
'locality': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': "orm['location.Locality']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'maker': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_items'", 'null': 'True', 'to': "orm['parties.Person']"}),
'manufacture_technique': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'old_maker': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_maker'", 'null': 'True', 'to': "orm['parties.Maker']"}),
'old_registration_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'other_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'photographer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['location.Place']", 'null': 'True'}),
'private_comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'raw_material': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'record_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.RecordStatus']", 'null': 'True', 'blank': 'True'}),
'recorded_use': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'reg_counter': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'reg_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'region_district': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': "orm['location.RegionDistrict']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'registered_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['parties.MuseumStaff']", 'null': 'True', 'blank': 'True'}),
'registration_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'registration_number': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'db_index': 'True'}),
'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['mediaman.Document']", 'null': 'True', 'blank': 'True'}),
'significance': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'site_name_number': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'state_province': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': "orm['location.StateProvince']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'storage_bay': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'storage_section': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'storage_shelf_box_drawer': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'storage_unit': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'when_collector_obtained': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'when_donor_obtained': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'cat.obtained': {
'Meta': {'object_name': 'Obtained'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'how': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'cat.recordstatus': {
'Meta': {'object_name': 'RecordStatus'},
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'location.country': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'Country'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.GlobalRegion']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.globalregion': {
'Meta': {'ordering': "['name']", 'object_name': 'GlobalRegion'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'icon_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'icon_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.locality': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'Locality'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.RegionDistrict']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.place': {
'Meta': {'ordering': "['id']", 'object_name': 'Place'},
'australian_state': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_corrected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'location.regiondistrict': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'RegionDistrict'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.StateProvince']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'location.stateprovince': {
'Meta': {'ordering': "['name']", 'unique_together': "(('parent', 'slug'),)", 'object_name': 'StateProvince'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gn_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'gn_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'on_delete': 'models.PROTECT', 'to': "orm['location.Country']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'mediaman.artefactrepresentation': {
'Meta': {'ordering': "['position']", 'unique_together': "(('artefact', 'md5sum'),)", 'object_name': 'ArtefactRepresentation'},
'artefact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cat.MuseumObject']"}),
'filesize': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_filedate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photographer': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'mediaman.document': {
'Meta': {'object_name': 'Document'},
'document': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'document_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filesize': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_filedate': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'original_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"})
},
'parties.maker': {
'Meta': {'ordering': "['name']", 'object_name': 'Maker'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'parties.museumstaff': {
'Meta': {'ordering': "['name']", 'object_name': 'MuseumStaff'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'parties.person': {
'Meta': {'ordering': "['name']", 'object_name': 'Person'},
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'related_people'", 'blank': 'True', 'to': "orm['mediaman.Document']"})
}
}
complete_apps = ['mediaman']
|
|
#!/usr/bin/env python
import numpy as np
from astropy.io import fits
import os
def logHeader(logfilename):
# adds a header to a log file
logfile = open(logfilename, 'w')
logfile.write('# filename tile ob ra dec mjdobs expTime airmass seeing_prhdu seeing_median esograde ext1count ext2count ext3count ext4count ext5count ext6count ext7count ext8count ext9count ext10count ext11count ext12count ext13count ext14count ext15count ext16count\n')
logfile.close()
def makecat(infile, outfile, fitstype='ext', logfilename=None):
# by default will save each array extension to a different output table
# to concatenate all array extensions into one output table use the keyword
# fitstype='all'
# by default does not keep a log (i.e. generate a catalogue of catalogues)
# to keep a log give a log file path as a keyword, e.g.
# logfilename='/path/to/logfile.dat'
# add a header to log file if log file does not exist
if logfilename:
if not os.path.isfile(logfilename):
logHeader(logfilename)
# needed if we want all array extensions as one output table
if fitstype=='all':
output = np.array([]).reshape(12,0)
print infile
# verify infile is fits file
try:
# try opening the FITS file
datafile = fits.open(infile)
except IOError:
# if not, print a warning to screen and log
print infile, 'is likely not a complete FITS file'
if logfilename:
# write message to log file
logfile = open(logfilename, 'a')
logfile.write(infile + ' IOError:' + ' is likely not a complete FITS file' + '\n')
logfile.close()
return
# grab some info from the headers
Object = datafile[0].header['object']
mjdobs = datafile[0].header['MJD-OBS']
RA = datafile[0].header['RA']
DEC = datafile[0].header['DEC']
airmassB = datafile[0].header['HIERARCH ESO TEL AIRM START']
airmassE = datafile[0].header['HIERARCH ESO TEL AIRM END']
seeingB = datafile[0].header['HIERARCH ESO TEL AMBI FWHM START']
seeingE = datafile[0].header['HIERARCH ESO TEL AMBI FWHM END']
ESOgrade = datafile[0].header['ESOGRADE']
expTime = datafile[0].header['EXPTIME']
OBname = datafile[0].header['HIERARCH ESO OBS NAME']
# calculate mean airmass and seeing
airmass = np.mean([airmassB,airmassE])
seeing = np.mean([seeingB,seeingE])
# add an eso grade if one is not present
# L stands for Leigh, aww yiss
if (len(ESOgrade) < 1):
ESOgrade = 'L'
if logfilename:
dataString = '%(file)s %(tile)s %(obname)s %(ra)s %(de)s %(mjdobs)s %(exp)s %(airmass)s %(seeing)s ' % \
{"file": infile,
"tile": Object,
"obname": OBname,
"ra": RA,
"de": DEC,
"mjdobs": mjdobs,
"exp": expTime,
"airmass": airmass,
"seeing": seeing}
# make fits header
prihdr = fits.Header()
prihdr['tile'] = Object
prihdr['mjdobs'] = mjdobs
prihdr['RA'] = RA
prihdr['Dec'] = DEC
prihdr['seeing'] = seeing
prihdr['airmass'] = airmass
prihdr['ESOgrade'] = ESOgrade
prihdr['expTime'] = expTime
# add header to fits file
prihdu = fits.PrimaryHDU(header=prihdr)
thdulist = fits.HDUList([prihdu])
# open seeing values array
seeingVals = np.array([])
# open chipcounts string
chipcounts = ''
# process each table
for ext in range(1,len(datafile)):
# make sure the table extension is readable (i.e. not corrupted)
try:
data = datafile[ext].data
except ValueError:
# if corrupted print message
print 'Extension', ext, 'is corrupted'
# save 0 rows to log file and move to next extension
chipcounts = chipcounts + str(0) + ' '
continue
else:
# if data was read save the number of sources to log file and carry on
chipcounts = chipcounts + str(datafile[ext].header['NAXIS2']) + ' '
# grab seeing data for this extension
seeing = datafile[ext].header['SEEING']
#################################################################################
# XY to RADEC conversion
# lifted from fitsio_cat_list:
#http://casu.ast.cam.ac.uk/surveys-projects/software-release/fitsio_cat_list.f/view
#################################################################################
tpa = datafile[ext].header['TCRVL3']
tpd = datafile[ext].header['TCRVL5']
a = datafile[ext].header['TC3_3']
b = datafile[ext].header['TC3_5']
c = datafile[ext].header['TCRPX3']
d = datafile[ext].header['TC5_3']
e = datafile[ext].header['TC5_5']
f = datafile[ext].header['TCRPX5']
projp1 = 1.0
projp3 = datafile[ext].header['TV5_3']
projp5 = datafile[ext].header['TV5_5']
tpa, tpd = np.radians([tpa, tpd])
tand = np.tan(tpd)
secd = 1.0/np.cos(tpd)
a, e, b, d = np.radians([a, e, b, d])
#pixel scale (deg/px)
pltscl = np.degrees(np.sqrt(0.5*(a**2+b**2+d**2+e**2)))
#seeing = seeing * pltscl * 3.6E3
seeingVals = np.append(seeingVals, (seeing * pltscl * 3.6E3))
x = np.float_(data['X_coordinate'])
y = np.float_(data['Y_coordinate'])
x = x-c
y = y-f
xi = a*x+b*y
xn = d*x+e*y
rp = np.hypot(xi,xn)
r = rp
r = rp / (projp1 + projp3*r**2 + projp5*r**4) #! NB 1st order approx
r = rp / (projp1 + projp3*r**2 + projp5*r**4) #! 2nd order correction
r = rp / (projp1 + projp3*r**2 + projp5*r**4) #! 3rd order correction
rfac = np.tan(r) / rp
xi = xi*rfac
xn = xn*rfac
aa = np.arctan(xi*secd/(1.0-xn*tand))
alpha = np.degrees((aa+tpa) % (2*np.pi))
delta = np.degrees(np.arctan((xn+tand)*np.sin(aa)/(xi*secd)))
#################################################################################
#################################################################################
#################################################################################
# converts x,y coordinates to standard coordinates and works out flux distortion factor
# lifted from fitsio_cat_list:
#http://casu.ast.cam.ac.uk/surveys-projects/software-release/fitsio_cat_list.f/view
#################################################################################
x = np.float_(data['X_coordinate'])
y = np.float_(data['Y_coordinate'])
xi = a*(x-c) + b*(y-f)
xn = d*(x-c) + e*(y-f)
r = np.hypot(xi,xn)
distortcorr = 1.0+3.0*projp3*r**2/projp1+5.0*projp5*r**4/projp1
distortcorr = distortcorr*(1.0+(projp3*r**2+projp5*r**4)/projp1)
distortcorr = 1.0/distortcorr
apcor = datafile[ext].header['APCOR2']
delapcor = 10.0**(0.4*(datafile[ext].header['APCOR3']-datafile[ext].header['APCOR5']))
apcor = 10.0**(0.4*apcor)
percorr = 0.0
percorr = 10.0**(0.4*percorr)
magzpt = (datafile[ext].header['MAGZPT']-
(airmassE-1.0)*datafile[ext].header['EXTINCT']
+2.5*np.log10(datafile[ext].header['EXPTIME']))
flux = np.maximum(0.1,np.float_(data['Aper_flux_2']))
fluxerr = np.maximum(1.0,np.float_(data['Aper_flux_2_err']))
#################################################################################
#################################################################################
#################################################################################
# flags saturated or bad pixel sources
# fixes saturated fluxes
# lifted from fitsio_cat_list:
#http://casu.ast.cam.ac.uk/surveys-projects/software-release/fitsio_cat_list.f/view
#################################################################################
saturate=0.9*(datafile[ext].header['SATURATE']-datafile[ext].header['SKYLEVEL'])
clss = data['Classification']
clss[np.float_(data['Peak_height'])>saturate] = -9
aperflux3 = np.float_(data['Aper_flux_3'])
aperflux5 = np.float_(data['Aper_flux_5'])
fixed = flux
fixed[clss==-9] = np.maximum(0.1,(
(aperflux5[clss==-9]-aperflux3[clss==-9])
/(delapcor-1.0)))
flux = np.maximum(fixed,flux)
clss[data['Error_bit_flag']>0.0] = -7
#################################################################################
#################################################################################
#################################################################################
# finish off mags
# lifted from fitsio_cat_list:
#http://casu.ast.cam.ac.uk/surveys-projects/software-release/fitsio_cat_list.f/view
#################################################################################
flux = flux*apcor*percorr*distortcorr
fluxerr = fluxerr*apcor*percorr*distortcorr
magobj = magzpt-2.5*np.log10(flux)
magerr = 2.5*np.log10(1.0+fluxerr/flux)
magerr = np.maximum(0.01,magerr)
#################################################################################
#################################################################################
# generate extNum array
extNums = np.ones(datafile[ext].header['NAXIS2'], dtype=np.int8) * ext
mjdobs_col = np.ones(datafile[ext].header['NAXIS2'], dtype=np.int8) * mjdobs
if fitstype=='ext':
# append data to fits table
tbhdu = fits.BinTableHDU.from_columns(
[fits.Column(name='RA', format='D', array=alpha, unit='degrees'),
fits.Column(name='Dec', format='D', array=delta, unit='degrees'),
fits.Column(name='X', format='D', array=data['X_coordinate']),
fits.Column(name='Y', format='D', array=data['Y_coordinate']),
fits.Column(name='e_X', format='D', array=data['X_coordinate_err']),
fits.Column(name='e_Y', format='D', array=data['Y_coordinate_err']),
fits.Column(name='mag', format='E', array=magobj, unit='mag'),
fits.Column(name='e_mag', format='E', array=magerr, unit='mag'),
fits.Column(name='ext', format='I2', array=extNums),
fits.Column(name='class', format='I1', array=clss),
fits.Column(name='ell', format='E', array=data['Ellipticity']),
fits.Column(name='mjdobs', format='D', array=mjdobs_col)])
thdulist.append(tbhdu)
elif fitstype=='all':
extData = np.vstack((alpha,delta,
data['X_coordinate'],data['Y_coordinate'],
data['X_coordinate_err'],data['Y_coordinate_err'],
magobj,magerr,
extNums,clss,
data['Ellipticity'], mjdobs_col))
output = np.hstack((output, extData))
else:
print 'WHAT DO YOU WANT ME TO DO??'
print 'unrecognized fits type'
if fitstype=='all':
tbhdu = fits.BinTableHDU.from_columns(
[fits.Column(name='RA', format='D', array=output[0], unit='degrees'),
fits.Column(name='Dec', format='D', array=output[1], unit='degrees'),
fits.Column(name='X', format='D', array=output[2]),
fits.Column(name='Y', format='D', array=output[3]),
fits.Column(name='e_X', format='D', array=output[4]),
fits.Column(name='e_Y', format='D', array=output[5]),
fits.Column(name='mag', format='E', array=output[6], unit='mag'),
fits.Column(name='e_mag', format='E', array=output[7], unit='mag'),
fits.Column(name='ext', format='I2', array=output[8]),
fits.Column(name='class', format='I1', array=output[9]),
fits.Column(name='ell', format='E', array=output[10]),
fits.Column(name='mjdobs', format='D', array=output[11])])
thdulist.append(tbhdu)
# write to file
thdulist.writeto(outfile)
if logfilename:
# write data to log file
logfile = open(logfilename, 'a')
logfile.write(dataString + str(np.median(seeingVals)) + ' ' + str(ESOgrade) + ' ' + chipcounts + '\n')
logfile.close()
|
|
# BridgeDB by Nick Mathewson.
# Copyright (c) 2007-2009, The Tor Project, Inc.
# See LICENSE for licensing information
"""
This module has low-level functionality for parsing bridges and arranging
them in rings.
"""
import binascii
import bisect
import hmac
import logging
import re
import sha
import socket
import time
import bridgedb.Storage
HEX_FP_LEN = 40
ID_LEN = 20
DIGESTMOD = sha
HEX_DIGEST_LEN = 40
DIGEST_LEN = 20
def is_valid_ip(ip):
"""Return True if ip is the string encoding of a valid IPv4 address,
and False otherwise.
>>> is_valid_ip('1.2.3.4')
True
>>> is_valid_ip('1.2.3.255')
True
>>> is_valid_ip('1.2.3.256')
False
>>> is_valid_ip('1')
False
>>> is_valid_ip('1.2.3')
False
>>> is_valid_ip('xyzzy')
False
"""
if not re.match(r'(\d+)\.(\d+)\.(\d+)\.(\d+)', ip):
# inet_aton likes "1.2" as a synonym for "0.0.1.2". We don't.
return False
try:
socket.inet_aton(ip)
except socket.error:
return False
else:
return True
def is_valid_fingerprint(fp):
"""Return true iff fp in the right format to be a hex fingerprint
of a Tor server.
"""
if len(fp) != HEX_FP_LEN:
return False
try:
fromHex(fp)
except TypeError:
return False
else:
return True
toHex = binascii.b2a_hex
fromHex = binascii.a2b_hex
def get_hmac(k,v):
"""Return the hmac of v using the key k."""
h = hmac.new(k, v, digestmod=DIGESTMOD)
return h.digest()
def get_hmac_fn(k, hex=True):
"""Return a function that computes the hmac of its input using the key k.
If 'hex' is true, the output of the function will be hex-encoded."""
h = hmac.new(k, digestmod=DIGESTMOD)
def hmac_fn(v):
h_tmp = h.copy()
h_tmp.update(v)
if hex:
return h_tmp.hexdigest()
else:
return h_tmp.digest()
return hmac_fn
def chopString(s, size):
"""Generator. Given a string and a length, divide the string into pieces
of no more than that length.
"""
for pos in xrange(0, len(s), size):
yield s[pos:pos+size]
class Bridge:
"""Holds information for a single bridge"""
## Fields:
## nickname -- The bridge's nickname. Not currently used.
## ip -- The bridge's IP address, as a dotted quad.
## orport -- The bridge's OR port.
## fingerprint -- The bridge's identity digest, in lowercase hex, with
## no spaces.
## running,stable -- DOCDOC
def __init__(self, nickname, ip, orport, fingerprint=None, id_digest=None):
"""Create a new Bridge. One of fingerprint and id_digest must be
set."""
self.nickname = nickname
self.ip = ip
self.orport = orport
self.running = self.stable = None
if id_digest is not None:
assert fingerprint is None
if len(id_digest) != DIGEST_LEN:
raise TypeError("Bridge with invalid ID")
self.fingerprint = toHex(id_digest)
elif fingerprint is not None:
if not is_valid_fingerprint(fingerprint):
raise TypeError("Bridge with invalid fingerprint (%r)"%
fingerprint)
self.fingerprint = fingerprint.lower()
else:
raise TypeError("Bridge with no ID")
def getID(self):
"""Return the bridge's identity digest."""
return fromHex(self.fingerprint)
def __repr__(self):
"""Return a piece of python that evaluates to this bridge."""
return "Bridge(%r,%r,%d,%r)"%(
self.nickname, self.ip, self.orport, self.fingerprint)
def getConfigLine(self,includeFingerprint=False):
"""Return a line describing this bridge for inclusion in a torrc."""
if includeFingerprint:
return "bridge %s:%d %s" % (self.ip, self.orport, self.fingerprint)
else:
return "bridge %s:%d" % (self.ip, self.orport)
def assertOK(self):
assert is_valid_ip(self.ip)
assert is_valid_fingerprint(self.fingerprint)
assert 1 <= self.orport <= 65535
def setStatus(self, running=None, stable=None):
if running is not None:
self.running = running
if stable is not None:
self.stable = stable
def parseDescFile(f, bridge_purpose='bridge'):
"""Generator. Parses a cached-descriptors file 'f', and yields a Bridge
object for every entry whose purpose matches bridge_purpose.
"""
nickname = ip = orport = fingerprint = purpose = None
for line in f:
line = line.strip()
if line.startswith("opt "):
line = line[4:]
if line.startswith("@purpose "):
items = line.split()
purpose = items[1]
elif line.startswith("router "):
items = line.split()
if len(items) >= 4:
nickname = items[1]
ip = items[2]
orport = int(items[3])
elif line.startswith("fingerprint "):
fingerprint = line[12:].replace(" ", "")
elif line.startswith("router-signature"):
purposeMatches = (purpose == bridge_purpose or
bridge_purpose is None)
if purposeMatches and nickname and ip and orport and fingerprint:
b = Bridge(nickname, ip, orport, fingerprint)
b.assertOK()
yield b
nickname = ip = orport = fingerprint = purpose = None
def parseStatusFile(f):
"""DOCDOC"""
ID = None
for line in f:
line = line.strip()
if line.startswith("opt "):
line = line[4:]
if line.startswith("r "):
try:
ID = binascii.a2b_base64(line.split()[2]+"=")
except binascii.Error:
logging.warn("Unparseable base64 ID %r", line.split()[2])
elif ID and line.startswith("s "):
flags = line.split()
yield ID, ("Running" in flags), ("Stable" in flags)
class BridgeHolder:
"""Abstract base class for all classes that hold bridges."""
def insert(self, bridge):
raise NotImplemented()
def clear(self):
pass
def assignmentsArePersistent(self):
return True
class BridgeRingParameters:
"""DOCDOC"""
def __init__(self, needPorts=(), needFlags=()):
"""DOCDOC takes list of port, count"""
for port,count in needPorts:
if not (1 <= port <= 65535):
raise TypeError("Port %s out of range."%port)
if count <= 0:
raise TypeError("Count %s out of range."%count)
for flag, count in needFlags:
flag = flag.lower()
if flag not in [ "stable" ]:
raise TypeError("Unsupported flag %s"%flag)
if count <= 0:
raise TypeError("Count %s out of range."%count)
self.needPorts = needPorts[:]
self.needFlags = [(flag.lower(),count) for flag, count in needFlags[:] ]
class BridgeRing(BridgeHolder):
"""Arranges bridges in a ring based on an hmac function."""
## Fields:
## bridges: a map from hmac value to Bridge.
## bridgesByID: a map from bridge ID Digest to Bridge.
## isSorted: true iff sortedKeys is currently sorted.
## sortedKeys: a list of all the hmacs, in order.
## name: a string to represent this ring in the logs.
def __init__(self, key, answerParameters=None):
"""Create a new BridgeRing, using key as its hmac key."""
self.bridges = {}
self.bridgesByID = {}
self.hmac = get_hmac_fn(key, hex=False)
self.isSorted = False
self.sortedKeys = []
if answerParameters is None:
answerParameters = BridgeRingParameters()
self.answerParameters = answerParameters
self.subrings = [] #DOCDOC
for port,count in self.answerParameters.needPorts:
#note that we really need to use the same key here, so that
# the mapping is in the same order for all subrings.
self.subrings.append( ('port',port,count,BridgeRing(key,None)) )
for flag,count in self.answerParameters.needFlags:
self.subrings.append( ('flag',flag,count,BridgeRing(key,None)) )
self.setName("Ring")
def setName(self, name):
"""DOCDOC"""
self.name = name
for tp,val,_,subring in self.subrings:
if tp == 'port':
subring.setName("%s (port-%s subring)"%(name, val))
else:
subring.setName("%s (%s subring)"%(name, val))
def __len__(self):
return len(self.bridges)
def clear(self):
self.bridges = {}
self.bridgesByID = {}
self.isSorted = False
self.sortedKeys = []
for tp, val, count, subring in self.subrings:
subring.clear()
def insert(self, bridge):
"""Add a bridge to the ring. If the bridge is already there,
replace the old one."""
for tp,val,_,subring in self.subrings:
if tp == 'port':
if val == bridge.orport:
subring.insert(bridge)
else:
assert tp == 'flag' and val == 'stable'
if val == 'stable' and bridge.stable:
subring.insert(bridge)
ident = bridge.getID()
pos = self.hmac(ident)
if not self.bridges.has_key(pos):
self.sortedKeys.append(pos)
self.isSorted = False
self.bridges[pos] = bridge
self.bridgesByID[ident] = bridge
logging.debug("Adding %s to %s", bridge.getConfigLine(True), self.name)
def _sort(self):
"""Helper: put the keys in sorted order."""
if not self.isSorted:
self.sortedKeys.sort()
self.isSorted = True
def _getBridgeKeysAt(self, pos, N=1):
"""Helper: return the N keys appearing in the ring after position
pos"""
assert len(pos) == DIGEST_LEN
if N >= len(self.sortedKeys):
return self.sortedKeys
if not self.isSorted:
self._sort()
idx = bisect.bisect_left(self.sortedKeys, pos)
r = self.sortedKeys[idx:idx+N]
if len(r) < N:
# wrap around as needed.
r.extend(self.sortedKeys[:N - len(r)])
assert len(r) == N
return r
def getBridges(self, pos, N=1):
"""Return the N bridges appearing in the ring after position pos"""
forced = []
for _,_,count,subring in self.subrings:
if len(subring) < count:
count = len(subring)
forced.extend(subring._getBridgeKeysAt(pos, count))
keys = [ ]
for k in forced + self._getBridgeKeysAt(pos, N):
if k not in keys:
keys.append(k)
keys = keys[:N]
keys.sort()
return [ self.bridges[k] for k in keys ]
def getBridgeByID(self, fp):
"""Return the bridge whose identity digest is fp, or None if no such
bridge exists."""
for _,_,_,subring in self.subrings:
b = subring.getBridgeByID(fp)
if b is not None:
return b
return self.bridgesByID.get(fp)
class FixedBridgeSplitter(BridgeHolder):
"""A bridgeholder that splits bridges up based on an hmac and assigns
them to several sub-bridgeholders with equal probability.
"""
def __init__(self, key, rings):
self.hmac = get_hmac_fn(key, hex=True)
self.rings = rings[:]
for r in self.rings:
assert(isinstance(r, BridgeHolder))
def insert(self, bridge):
# Grab the first 4 bytes
digest = self.hmac(bridge.getID())
pos = long( digest[:8], 16 )
which = pos % len(self.rings)
self.rings[which].insert(bridge)
def clear(self):
for r in self.rings:
r.clear()
def __len__(self):
n = 0
for r in self.rings:
n += len(r)
return n
class UnallocatedHolder(BridgeHolder):
"""A pseudo-bridgeholder that ignores its bridges and leaves them
unassigned.
"""
def insert(self, bridge):
logging.debug("Leaving %s unallocated", bridge.getConfigLine(True))
def assignmentsArePersistent(self):
return False
def __len__(self):
return 0
class BridgeSplitter(BridgeHolder):
"""A BridgeHolder that splits incoming bridges up based on an hmac,
and assigns them to sub-bridgeholders with different probabilities.
Bridge-to-bridgeholder associations are recorded in a store.
"""
def __init__(self, key):
self.hmac = get_hmac_fn(key, hex=True)
self.ringsByName = {}
self.totalP = 0
self.pValues = []
self.rings = []
self.statsHolders = []
def __len__(self):
n = 0
for r in self.ringsByName.values():
n += len(r)
return n
def addRing(self, ring, ringname, p=1):
"""Add a new bridgeholder.
ring -- the bridgeholder to add.
ringname -- a string representing the bridgeholder. This is used
to record which bridges have been assigned where in the store.
p -- the relative proportion of bridges to assign to this
bridgeholder.
"""
assert isinstance(ring, BridgeHolder)
self.ringsByName[ringname] = ring
self.pValues.append(self.totalP)
self.rings.append(ringname)
self.totalP += p
def addTracker(self, t):
"""Adds a statistics tracker that gets told about every bridge we see.
"""
self.statsHolders.append(t)
def clear(self):
for r in self.ringsByName.values():
r.clear()
def insert(self, bridge):
assert self.rings
db = bridgedb.Storage.getDB()
for s in self.statsHolders:
s.insert(bridge)
if not bridge.running:
return
bridgeID = bridge.getID()
# Determine which ring to put this bridge in if we haven't seen it
# before.
pos = self.hmac(bridgeID)
n = int(pos[:8], 16) % self.totalP
pos = bisect.bisect_right(self.pValues, n) - 1
assert 0 <= pos < len(self.rings)
ringname = self.rings[pos]
ringname = db.insertBridgeAndGetRing(bridge, ringname, time.time())
db.commit()
ring = self.ringsByName.get(ringname)
ring.insert(bridge)
|
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test covers a resharding scenario of an already sharded keyspace.
We start with shards -80 and 80-. We then split 80- into 80-c0 and c0-.
This test is the main resharding test. It not only tests the regular resharding
workflow for an horizontal split, but also a lot of error cases and side
effects, like:
- migrating the traffic one cell at a time.
- migrating rdonly traffic back and forth.
- making sure we can't migrate the master until replica and rdonly are migrated.
- has a background thread to insert data during migration.
- tests a destination shard master failover while replication is running.
- tests a filtered replication source replacement while filtered replication
is running.
- tests 'vtctl SourceShardAdd' and 'vtctl SourceShardDelete'.
- makes sure the key range rules are properly enforced on masters.
"""
import threading
import time
import logging
import unittest
import base_sharding
import environment
import tablet
import utils
from vtproto import topodata_pb2
from vtdb import keyrange_constants
# initial shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_ny_rdonly = tablet.Tablet(cell='ny')
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_slave1 = tablet.Tablet()
shard_1_slave2 = tablet.Tablet()
shard_1_ny_rdonly = tablet.Tablet(cell='ny')
shard_1_rdonly1 = tablet.Tablet()
# split shards
# range 80 - c0
shard_2_master = tablet.Tablet()
shard_2_replica1 = tablet.Tablet()
shard_2_replica2 = tablet.Tablet()
shard_2_rdonly1 = tablet.Tablet()
# range c0 - ''
shard_3_master = tablet.Tablet()
shard_3_replica = tablet.Tablet()
shard_3_rdonly1 = tablet.Tablet()
shard_2_tablets = [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1]
shard_3_tablets = [shard_3_master, shard_3_replica, shard_3_rdonly1]
all_tablets = ([shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1] +
shard_2_tablets + shard_3_tablets)
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql(use_rbr=base_sharding.use_rbr)
for t in all_tablets]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
# InsertThread will insert a value into the timestamps table, and then
# every 1/5s will update its value with the current timestamp
class InsertThread(threading.Thread):
def __init__(self, tablet_obj, thread_name, thread_id, user_id,
keyspace_id):
threading.Thread.__init__(self)
self.tablet = tablet_obj
self.thread_name = thread_name
self.thread_id = thread_id
self.user_id = user_id
self.keyspace_id = keyspace_id
self.str_keyspace_id = utils.uint64_to_hex(keyspace_id)
self.done = False
self.tablet.mquery(
'vt_test_keyspace',
['begin',
'insert into timestamps(id, time_milli, custom_ksid_col) '
'values(%d, %d, 0x%x) '
'/* vtgate:: keyspace_id:%s */ /* user_id:%d */' %
(self.thread_id, long(time.time() * 1000), self.keyspace_id,
self.str_keyspace_id, self.user_id),
'commit'],
write=True, user='vt_app')
self.start()
def run(self):
try:
while not self.done:
self.tablet.mquery(
'vt_test_keyspace',
['begin',
'update timestamps set time_milli=%d '
'where id=%d /* vtgate:: keyspace_id:%s */ /* user_id:%d */' %
(long(time.time() * 1000), self.thread_id,
self.str_keyspace_id, self.user_id),
'commit'],
write=True, user='vt_app')
time.sleep(0.2)
except Exception: # pylint: disable=broad-except
logging.exception('InsertThread got exception.')
# MonitorLagThread will get values from a database, and compare the timestamp
# to evaluate lag. Since the qps is really low, and we send binlogs as chunks,
# the latency is pretty high (a few seconds).
class MonitorLagThread(threading.Thread):
def __init__(self, tablet_obj, thread_name, thread_id):
threading.Thread.__init__(self)
self.tablet = tablet_obj
self.thread_name = thread_name
self.thread_id = thread_id
self.done = False
self.max_lag_ms = 0
self.lag_sum_ms = 0
self.sample_count = 0
self.start()
def run(self):
try:
while not self.done:
result = self.tablet.mquery(
'vt_test_keyspace',
'select time_milli from timestamps where id=%d' %
self.thread_id)
if result:
lag_ms = long(time.time() * 1000) - long(result[0][0])
logging.debug('MonitorLagThread(%s) got %d ms',
self.thread_name, lag_ms)
self.sample_count += 1
self.lag_sum_ms += lag_ms
if lag_ms > self.max_lag_ms:
self.max_lag_ms = lag_ms
time.sleep(1.0)
except Exception: # pylint: disable=broad-except
logging.exception('MonitorLagThread got exception.')
class TestResharding(unittest.TestCase, base_sharding.BaseShardingTest):
# create_schema will create the same schema on the keyspace
# then insert some values
def _create_schema(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
# Note that the primary key columns are not defined first on purpose to test
# that a reordered column list is correctly used everywhere in vtworker.
create_table_template = '''create table %s(
custom_ksid_col ''' + t + ''' not null,
msg varchar(64),
id bigint not null,
parent_id bigint not null,
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
create_view_template = (
'create view %s'
'(parent_id, id, msg, custom_ksid_col)'
'as select parent_id, id, msg, custom_ksid_col '
'from %s')
create_timestamp_table = '''create table timestamps(
id int not null,
time_milli bigint(20) unsigned not null,
custom_ksid_col ''' + t + ''' not null,
primary key (id)
) Engine=InnoDB'''
# Make sure that clone and diff work with tables which have no primary key.
# RBR only because Vitess requires the primary key for query rewrites if
# it is running with statement based replication.
create_no_pk_table = '''create table no_pk(
custom_ksid_col ''' + t + ''' not null,
msg varchar(64),
id bigint not null,
parent_id bigint not null
) Engine=InnoDB'''
create_unrelated_table = '''create table unrelated(
name varchar(64),
primary key (name)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding2'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_view_template % ('view1', 'resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_timestamp_table,
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_unrelated_table,
'test_keyspace'],
auto_log=True)
if base_sharding.use_rbr:
utils.run_vtctl(['ApplySchema', '-sql=' + create_no_pk_table,
'test_keyspace'], auto_log=True)
def _insert_startup_values(self):
self._insert_value(shard_0_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._insert_value(shard_1_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._insert_value(shard_1_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
if base_sharding.use_rbr:
self._insert_value(shard_1_master, 'no_pk', 1, 'msg1',
0xA000000000000000)
# TODO(github.com/vitessio/vitess/issues/2880): Add more rows here such
# clone and diff would break when the insertion order on source and
# dest shards is different.
def _check_startup_values(self):
# check first value is in the right shard
for t in shard_2_tablets:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000)
for t in shard_3_tablets:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000,
should_be_here=False)
# check second value is in the right shard too
for t in shard_2_tablets:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000,
should_be_here=False)
for t in shard_3_tablets:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000)
if base_sharding.use_rbr:
for t in shard_2_tablets:
self._check_value(t, 'no_pk', 1, 'msg1', 0xA000000000000000)
for t in shard_3_tablets:
self._check_value(t, 'no_pk', 1, 'msg1', 0xA000000000000000,
should_be_here=False)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_1_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_1_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
def _exec_multi_shard_dmls(self):
mids = [10000001, 10000002, 10000003]
msg_ids = ['msg-id10000001', 'msg-id10000002', 'msg-id10000003']
keyspace_ids = [0x9000000000000000, 0xD000000000000000,
0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
mids = [10000004, 10000005]
msg_ids = ['msg-id10000004', 'msg-id10000005']
keyspace_ids = [0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
mids = [10000011, 10000012, 10000013]
msg_ids = ['msg-id10000011', 'msg-id10000012', 'msg-id10000013']
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
# This update targets two shards.
self._exec_non_annotated_update(shard_1_master, 'resharding1',
[10000011, 10000012], 'update1')
# This update targets one shard.
self._exec_non_annotated_update(shard_1_master, 'resharding1',
[10000013], 'update2')
mids = [10000014, 10000015, 10000016]
msg_ids = ['msg-id10000014', 'msg-id10000015', 'msg-id10000016']
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
# This delete targets two shards.
self._exec_non_annotated_delete(shard_1_master, 'resharding1',
[10000014, 10000015])
# This delete targets one shard.
self._exec_non_annotated_delete(shard_1_master, 'resharding1', [10000016])
def _check_multi_shard_values(self):
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000001, 'msg-id10000001', 0x9000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000002, 'msg-id10000002', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000003, 'msg-id10000003', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000001, 'msg-id10000001', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000002, 'msg-id10000002', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000003, 'msg-id10000003', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000004, 'msg-id10000004', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000005, 'msg-id10000005', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000004, 'msg-id10000004', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000005, 'msg-id10000005', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000011, 'update1', 0x9000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000012, 'update1', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000013, 'update2', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000014, 'msg-id10000014', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000015, 'msg-id10000015', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000016, 'msg-id10000016', 0xF000000000000000,
should_be_here=False)
# _check_multi_dbs checks the row in multiple dbs.
def _check_multi_dbs(self, dblist, table, mid, msg, keyspace_id,
should_be_here=True):
for db in dblist:
self._check_value(db, table, mid, msg, keyspace_id, should_be_here)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_2_replica2, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_3_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('waiting for %d%% of the data' % threshold,
timeout, sleep_time=1)
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
for i in xrange(count):
self._check_value(shard_3_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_2_replica2, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def test_resharding(self):
# we're going to reparent and swap these two
global shard_2_master, shard_2_replica1
utils.run_vtctl(['CreateKeyspace',
'--sharding_column_name', 'bad_column',
'--sharding_column_type', 'bytes',
'test_keyspace'])
utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace',
'custom_ksid_col', 'uint64'], expect_fail=True)
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force',
'test_keyspace',
'custom_ksid_col', base_sharding.keyspace_id_type])
shard_0_master.init_tablet('replica', 'test_keyspace', '-80')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_0_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')
shard_1_master.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave1.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave2.init_tablet('replica', 'test_keyspace', '80-')
shard_1_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')
shard_1_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(ks['sharding_column_name'], 'custom_ksid_col')
# we set full_mycnf_args to True as a test in the KIT_BYTES case
full_mycnf_args = (base_sharding.keyspace_id_type ==
keyrange_constants.KIT_BYTES)
# create databases so vttablet can start behaving somewhat normally
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None, full_mycnf_args=full_mycnf_args,
binlog_use_v3_resharding_mode=False)
# wait for the tablets (replication is not setup, they won't be healthy)
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
# check the shards
shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace'])
self.assertIn('-80', shards, 'unexpected shards: %s' % str(shards))
self.assertIn('80-', shards, 'unexpected shards: %s' % str(shards))
self.assertEqual(len(shards), 2, 'unexpected shards: %s' % str(shards))
# create the tables
self._create_schema()
self._insert_startup_values()
# run a health check on source replicas so they respond to discovery
# (for binlog players) and on the source rdonlys (for workers)
for t in [shard_0_replica, shard_1_slave1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
for t in [shard_0_ny_rdonly, shard_1_ny_rdonly, shard_1_rdonly1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
# create the split shards
shard_2_master.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_replica1.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_replica2.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-c0')
shard_3_master.init_tablet('replica', 'test_keyspace', 'c0-')
shard_3_replica.init_tablet('replica', 'test_keyspace', 'c0-')
shard_3_rdonly1.init_tablet('rdonly', 'test_keyspace', 'c0-')
# start vttablet on the split shards (no db created,
# so they're all not serving)
shard_2_master.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
shard_3_master.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_2_replica1, shard_2_replica2, shard_2_rdonly1,
shard_3_replica, shard_3_rdonly1]:
t.start_vttablet(wait_for_state=None,
binlog_use_v3_resharding_mode=False)
for t in [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1,
shard_3_master, shard_3_replica, shard_3_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-c0',
shard_2_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/c0-',
shard_3_master.tablet_alias], auto_log=True)
# check the shards
shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace'])
for s in ['-80', '80-', '80-c0', 'c0-']:
self.assertIn(s, shards, 'unexpected shards: %s' % str(shards))
self.assertEqual(len(shards), 4, 'unexpected shards: %s' % str(shards))
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace(
'test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# disable shard_1_slave2, so we're sure filtered replication will go
# from shard_1_slave1
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('NOT_SERVING')
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/80-c0', 'test_keyspace/c0-'):
utils.run_vtctl(['CopySchemaShard', '--exclude_tables', 'unrelated',
shard_1_rdonly1.tablet_alias, keyspace_shard],
auto_log=True)
# Run vtworker as daemon for the following SplitClone commands.
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--command_display_interval', '10ms',
'--use_v3_resharding_mode=false'],
auto_log=True)
# Copy the data from the source to the destination shards.
# --max_tps is only specified to enable the throttler and ensure that the
# code is executed. But the intent here is not to throttle the test, hence
# the rate limit is set very high.
#
# Initial clone (online).
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
2, 0, 0, 0)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Test the correct handling of keyspace_id changes which happen after
# the first clone.
# Let row 2 go to shard 3 instead of shard 2.
shard_1_master.mquery('vt_test_keyspace',
'update resharding1 set'
' custom_ksid_col=0xD000000000000000 WHERE id=2',
write=True)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Row 2 will be deleted from shard 2 and inserted to shard 3.
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 0, 1, 1)
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0xD000000000000000, should_be_here=False)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0xD000000000000000)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Move row 2 back to shard 2 from shard 3 by changing the keyspace_id again.
shard_1_master.mquery('vt_test_keyspace',
'update resharding1 set'
' custom_ksid_col=0x9000000000000000 WHERE id=2',
write=True)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Row 2 will be deleted from shard 3 and inserted to shard 2.
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 0, 1, 1)
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Modify the destination shard. SplitClone will revert the changes.
# Delete row 2 (provokes an insert).
shard_2_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=2', write=True)
# Update row 3 (provokes an update).
shard_3_master.mquery('vt_test_keyspace',
"update resharding1 set msg='msg-not-3' where id=3",
write=True)
# Insert row 4 and 5 (provokes a delete).
self._insert_value(shard_3_master, 'resharding1', 4, 'msg4',
0xD000000000000000)
self._insert_value(shard_3_master, 'resharding1', 5, 'msg5',
0xD000000000000000)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Change tablet, which was taken offline, back to rdonly.
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias,
'rdonly'], auto_log=True)
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 1, 2, 0)
self.verify_reconciliation_counters(worker_port, 'Offline', 'resharding1',
0, 0, 0, 2)
# Terminate worker daemon because it is no longer needed.
utils.kill_sub_process(worker_proc, soft=True)
# TODO(alainjobart): experiment with the dontStartBinlogPlayer option
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', '--exclude_tables=unrelated',
'test_keyspace'], auto_log=True)
# check the binlog players are running and exporting vars
self.check_destination_master(shard_2_master, ['test_keyspace/80-'])
self.check_destination_master(shard_3_master, ['test_keyspace/80-'])
# When the binlog players/filtered replication is turned on, the query
# service must be turned off on the destination masters.
# The tested behavior is a safeguard to prevent that somebody can
# accidentally modify data on the destination masters while they are not
# migrated yet and the source shards are still the source of truth.
shard_2_master.wait_for_vttablet_state('NOT_SERVING')
shard_3_master.wait_for_vttablet_state('NOT_SERVING')
# check that binlog server exported the stats vars
self.check_binlog_server_vars(shard_1_slave1, horizontal=True)
# Check that the throttler was enabled.
self.check_throttler_service(shard_2_master.rpc_endpoint(),
['BinlogPlayer/0'], 9999)
self.check_throttler_service(shard_3_master.rpc_endpoint(),
['BinlogPlayer/0'], 9999)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000)
logging.debug('Executing MultiValue Insert Queries')
self._exec_multi_shard_dmls()
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 5)
if v != 100:
# small optimization: only do this check if we don't have all the data
# already anyway.
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 20)
logging.debug('Checking no data was sent the wrong way')
self._check_lots_not_present(1000)
logging.debug('Checking MultiValue Insert Queries')
self._check_multi_shard_values()
self.check_binlog_player_vars(shard_2_master, ['test_keyspace/80-'],
seconds_behind_master_max=30)
self.check_binlog_player_vars(shard_3_master, ['test_keyspace/80-'],
seconds_behind_master_max=30)
self.check_binlog_server_vars(shard_1_slave1, horizontal=True,
min_statements=1000, min_transactions=1000)
# use vtworker to compare the data (after health-checking the destination
# rdonly tablets so discovery works)
utils.run_vtctl(['RunHealthCheck', shard_3_rdonly1.tablet_alias])
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.pause('Good time to test vtworker for diffs')
# get status for destination master tablets, make sure we have it all
if base_sharding.use_rbr:
# We submitted non-annotated DMLs, that are properly routed
# with RBR, but not with SBR. So the first shard counts
# are smaller. In the second shard, we submitted statements
# that affect more than one keyspace id. These will result
# in two queries with RBR. So the count there is higher.
self.check_running_binlog_player(shard_2_master, 4018, 2008)
self.check_running_binlog_player(shard_3_master, 4028, 2008)
else:
self.check_running_binlog_player(shard_2_master, 4022, 2008)
self.check_running_binlog_player(shard_3_master, 4024, 2008)
# start a thread to insert data into shard_1 in the background
# with current time, and monitor the delay
insert_thread_1 = InsertThread(shard_1_master, 'insert_low', 1, 10000,
0x9000000000000000)
insert_thread_2 = InsertThread(shard_1_master, 'insert_high', 2, 10001,
0xD000000000000000)
monitor_thread_1 = MonitorLagThread(shard_2_replica2, 'insert_low', 1)
monitor_thread_2 = MonitorLagThread(shard_3_replica, 'insert_high', 2)
# tests a failover switching serving to a different replica
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'replica'])
utils.run_vtctl(['ChangeSlaveType', shard_1_slave1.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('SERVING')
shard_1_slave1.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['RunHealthCheck', shard_1_slave2.tablet_alias])
# test data goes through again
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000, base=1000)
logging.debug('Checking 80 percent of data was sent quickly')
self._check_lots_timeout(1000, 80, 5, base=1000)
self.check_binlog_server_vars(shard_1_slave2, horizontal=True,
min_statements=800, min_transactions=800)
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
expect_fail=True)
# check query service is off on master 2 and master 3, as filtered
# replication is enabled. Even health check that is enabled on
# master 3 should not interfere (we run it to be sure).
utils.run_vtctl(['RunHealthCheck', shard_3_master.tablet_alias],
auto_log=True)
for master in [shard_2_master, shard_3_master]:
utils.check_tablet_query_service(self, master, False, False)
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
master.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('serving', stream_health)
# check the destination master 3 is healthy, even though its query
# service is not running (if not healthy this would exception out)
shard_3_master.get_healthz()
# now serve rdonly from the split shards, in test_nj only
utils.run_vtctl(['MigrateServedTypes', '--cells=test_nj',
'test_keyspace/80-', 'rdonly'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# now serve rdonly from the split shards, everywhere
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, False, True)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# then serve replica from the split shards
destination_shards = ['test_keyspace/80-c0', 'test_keyspace/c0-']
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# move replica back and forth
utils.run_vtctl(
['MigrateServedTypes', '-reverse', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on
# source and disabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, True, False)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly.
utils.check_shard_query_services(self, destination_shards,
topodata_pb2.REPLICA, False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on
# source and enabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly
utils.check_shard_query_services(self, destination_shards,
topodata_pb2.REPLICA, True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# reparent shard_2 to shard_2_replica1, then insert more data and
# see it flow through still
utils.run_vtctl(['PlannedReparentShard',
'-keyspace_shard', 'test_keyspace/80-c0',
'-new_master', shard_2_replica1.tablet_alias])
# update our test variables to point at the new master
shard_2_master, shard_2_replica1 = shard_2_replica1, shard_2_master
logging.debug('Inserting lots of data on source shard after reparenting')
self._insert_lots(3000, base=2000)
logging.debug('Checking 80 percent of data was sent fairly quickly')
self._check_lots_timeout(3000, 80, 10, base=2000)
# use vtworker to compare the data again
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
# going to migrate the master now, check the delays
monitor_thread_1.done = True
monitor_thread_2.done = True
insert_thread_1.done = True
insert_thread_2.done = True
logging.debug('DELAY 1: %s max_lag=%d ms avg_lag=%d ms',
monitor_thread_1.thread_name,
monitor_thread_1.max_lag_ms,
monitor_thread_1.lag_sum_ms / monitor_thread_1.sample_count)
logging.debug('DELAY 2: %s max_lag=%d ms avg_lag=%d ms',
monitor_thread_2.thread_name,
monitor_thread_2.max_lag_ms,
monitor_thread_2.lag_sum_ms / monitor_thread_2.sample_count)
# mock with the SourceShard records to test 'vtctl SourceShardDelete'
# and 'vtctl SourceShardAdd'
utils.run_vtctl(['SourceShardDelete', 'test_keyspace/c0-', '0'],
auto_log=True)
utils.run_vtctl(['SourceShardAdd', '--key_range=80-',
'test_keyspace/c0-', '0', 'test_keyspace/80-'],
auto_log=True)
# then serve master from the split shards, make sure the source master's
# query service is now turned off
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-c0 c0-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_master, False, True)
# check the binlog players are gone now
self.check_no_binlog_player(shard_2_master)
self.check_no_binlog_player(shard_3_master)
# delete the original tablets in the original shard
tablet.kill_tablets([shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1])
for t in [shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
utils.run_vtctl(['DeleteTablet', '-allow_master',
shard_1_master.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(
['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# test RemoveShardCell
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/-80', 'test_nj'], auto_log=True,
expect_fail=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_nj'], auto_log=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_ny'], auto_log=True)
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/80-'])
self.assertTrue('cells' not in shard or not shard['cells'])
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/80-'], auto_log=True)
# make sure we can't delete the destination shard now that it's serving
_, stderr = utils.run_vtctl(['DeleteShard', 'test_keyspace/80-c0'],
expect_fail=True)
self.assertIn('is still serving, cannot delete it', stderr)
# kill everything
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1,
shard_3_master, shard_3_replica, shard_3_rdonly1])
if __name__ == '__main__':
utils.main()
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import os
import re
import shutil
import time
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
from neutron.agent.common import utils as common_utils
from neutron.agent.linux import external_process
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import ipv6_utils
from neutron.common import utils as commonutils
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.i18n import _LI, _LW, _LE
LOG = logging.getLogger(__name__)
UDP = 'udp'
TCP = 'tcp'
DNS_PORT = 53
DHCPV4_PORT = 67
DHCPV6_PORT = 547
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
METADATA_DEFAULT_PREFIX)
METADATA_PORT = 80
WIN2k3_STATIC_DNS = 249
NS_PREFIX = 'qdhcp-'
DNSMASQ_SERVICE_NAME = 'dnsmasq'
class DictModel(dict):
"""Convert dict into an object that provides attribute access to values."""
def __init__(self, *args, **kwargs):
"""Convert dict values to DictModel values."""
super(DictModel, self).__init__(*args, **kwargs)
def needs_upgrade(item):
"""Check if `item` is a dict and needs to be changed to DictModel.
"""
return isinstance(item, dict) and not isinstance(item, DictModel)
def upgrade(item):
"""Upgrade item if it needs to be upgraded."""
if needs_upgrade(item):
return DictModel(item)
else:
return item
for key, value in six.iteritems(self):
if isinstance(value, (list, tuple)):
# Keep the same type but convert dicts to DictModels
self[key] = type(value)(
(upgrade(item) for item in value)
)
elif needs_upgrade(value):
# Change dict instance values to DictModel instance values
self[key] = DictModel(value)
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
class NetModel(DictModel):
def __init__(self, use_namespaces, d):
super(NetModel, self).__init__(d)
self._ns_name = (use_namespaces and
"%s%s" % (NS_PREFIX, self.id) or None)
@property
def namespace(self):
return self._ns_name
@six.add_metaclass(abc.ABCMeta)
class DhcpBase(object):
def __init__(self, conf, network, process_monitor,
version=None, plugin=None):
self.conf = conf
self.network = network
self.process_monitor = process_monitor
self.device_manager = DeviceManager(self.conf, plugin)
self.version = version
@abc.abstractmethod
def enable(self):
"""Enables DHCP for this network."""
@abc.abstractmethod
def disable(self, retain_port=False):
"""Disable dhcp for this network."""
def restart(self):
"""Restart the dhcp service for the network."""
self.disable(retain_port=True)
self.enable()
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the DHCP server."""
@abc.abstractmethod
def reload_allocations(self):
"""Force the DHCP server to reload the assignment database."""
@classmethod
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
raise NotImplementedError()
@classmethod
def check_version(cls):
"""Execute version checks on DHCP server."""
raise NotImplementedError()
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated"""
raise NotImplementedError()
@classmethod
def should_enable_metadata(cls, conf, network):
"""True if the metadata-proxy should be enabled for the network."""
raise NotImplementedError()
class DhcpLocalProcess(DhcpBase):
PORTS = []
def __init__(self, conf, network, process_monitor, version=None,
plugin=None):
super(DhcpLocalProcess, self).__init__(conf, network, process_monitor,
version, plugin)
self.confs_dir = self.get_confs_dir(conf)
self.network_conf_dir = os.path.join(self.confs_dir, network.id)
commonutils.ensure_dir(self.network_conf_dir)
@staticmethod
def get_confs_dir(conf):
return os.path.abspath(os.path.normpath(conf.dhcp_confs))
def get_conf_file_name(self, kind):
"""Returns the file name for a given kind of config file."""
return os.path.join(self.network_conf_dir, kind)
def _remove_config_files(self):
shutil.rmtree(self.network_conf_dir, ignore_errors=True)
def _enable_dhcp(self):
"""check if there is a subnet within the network with dhcp enabled."""
for subnet in self.network.subnets:
if subnet.enable_dhcp:
return True
return False
def enable(self):
"""Enables DHCP for this network by spawning a local process."""
if self.active:
self.restart()
elif self._enable_dhcp():
commonutils.ensure_dir(self.network_conf_dir)
interface_name = self.device_manager.setup(self.network)
self.interface_name = interface_name
self.spawn_process()
def _get_process_manager(self, cmd_callback=None):
return external_process.ProcessManager(
conf=self.conf,
uuid=self.network.id,
namespace=self.network.namespace,
default_cmd_callback=cmd_callback,
pid_file=self.get_conf_file_name('pid'),
run_as_root=True)
def disable(self, retain_port=False):
"""Disable DHCP for this network by killing the local process."""
self.process_monitor.unregister(self.network.id, DNSMASQ_SERVICE_NAME)
self._get_process_manager().disable()
if not retain_port:
self._destroy_namespace_and_port()
self._remove_config_files()
def _destroy_namespace_and_port(self):
try:
self.device_manager.destroy(self.network, self.interface_name)
except RuntimeError:
LOG.warning(_LW('Failed trying to delete interface: %s'),
self.interface_name)
if self.network.namespace:
ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except RuntimeError:
LOG.warning(_LW('Failed trying to delete namespace: %s'),
self.network.namespace)
def _get_value_from_conf_file(self, kind, converter=None):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
try:
return converter(f.read()) if converter else f.read()
except ValueError:
msg = _('Unable to convert value in %s')
except IOError:
msg = _('Unable to access %s')
LOG.debug(msg, file_name)
return None
@property
def interface_name(self):
return self._get_value_from_conf_file('interface')
@interface_name.setter
def interface_name(self, value):
interface_file_path = self.get_conf_file_name('interface')
utils.replace_file(interface_file_path, value)
@property
def active(self):
return self._get_process_manager().active
@abc.abstractmethod
def spawn_process(self):
pass
class Dnsmasq(DhcpLocalProcess):
# The ports that need to be opened when security policies are active
# on the Neutron port used for DHCP. These are provided as a convenience
# for users of this class.
PORTS = {constants.IP_VERSION_4:
[(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
constants.IP_VERSION_6:
[(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
}
_TAG_PREFIX = 'tag%d'
_ID = 'id:'
@classmethod
def check_version(cls):
pass
@classmethod
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
confs_dir = cls.get_confs_dir(conf)
try:
return [
c for c in os.listdir(confs_dir)
if uuidutils.is_uuid_like(c)
]
except OSError:
return []
def _build_cmdline_callback(self, pid_file):
cmd = [
'dnsmasq',
'--no-hosts',
'--no-resolv',
'--strict-order',
'--except-interface=lo',
'--pid-file=%s' % pid_file,
'--dhcp-hostsfile=%s' % self.get_conf_file_name('host'),
'--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'),
'--dhcp-optsfile=%s' % self.get_conf_file_name('opts'),
'--dhcp-leasefile=%s' % self.get_conf_file_name('leases'),
'--dhcp-match=set:ipxe,175',
]
if self.device_manager.driver.bridged:
cmd += [
'--bind-interfaces',
'--interface=%s' % self.interface_name,
]
else:
cmd += [
'--bind-dynamic',
'--interface=%s' % self.interface_name,
'--interface=tap*',
'--bridge-interface=%s,tap*' % self.interface_name,
]
possible_leases = 0
for i, subnet in enumerate(self.network.subnets):
mode = None
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# Note(scollins) If the IPv6 attributes are not set, set it as
# static to preserve previous behavior
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
ra_mode = getattr(subnet, 'ipv6_ra_mode', None)
if (addr_mode in [constants.DHCPV6_STATEFUL,
constants.DHCPV6_STATELESS] or
not addr_mode and not ra_mode):
mode = 'static'
cidr = netaddr.IPNetwork(subnet.cidr)
if self.conf.dhcp_lease_duration == -1:
lease = 'infinite'
else:
lease = '%ss' % self.conf.dhcp_lease_duration
# mode is optional and is not set - skip it
if mode:
if subnet.ip_version == 4:
cmd.append('--dhcp-range=%s%s,%s,%s,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode, lease))
else:
cmd.append('--dhcp-range=%s%s,%s,%s,%d,%s' %
('set:', self._TAG_PREFIX % i,
cidr.network, mode,
cidr.prefixlen, lease))
possible_leases += cidr.size
if cfg.CONF.advertise_mtu:
mtu = self.network.mtu
# Do not advertise unknown mtu
if mtu > 0:
cmd.append('--dhcp-option-force=option:mtu,%d' % mtu)
# Cap the limit because creating lots of subnets can inflate
# this possible lease cap.
cmd.append('--dhcp-lease-max=%d' %
min(possible_leases, self.conf.dnsmasq_lease_max))
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
if self.conf.dnsmasq_dns_servers:
cmd.extend(
'--server=%s' % server
for server in self.conf.dnsmasq_dns_servers)
if self.conf.dhcp_domain:
cmd.append('--domain=%s' % self.conf.dhcp_domain)
if self.conf.dhcp_broadcast_reply:
cmd.append('--dhcp-broadcast')
if self.conf.dnsmasq_base_log_dir:
try:
if not os.path.exists(self.conf.dnsmasq_base_log_dir):
os.makedirs(self.conf.dnsmasq_base_log_dir)
log_filename = os.path.join(
self.conf.dnsmasq_base_log_dir,
self.network.id, 'dhcp_dns_log')
cmd.append('--log-queries')
cmd.append('--log-dhcp')
cmd.append('--log-facility=%s' % log_filename)
except OSError:
LOG.error(_LE('Error while create dnsmasq base log dir: %s'),
self.conf.dnsmasq_base_log_dir)
return cmd
def spawn_process(self):
"""Spawn the process, if it's not spawned already."""
# we only need to generate the lease file the first time dnsmasq starts
# rather than on every reload since dnsmasq will keep the file current
self._output_init_lease_file()
self._spawn_or_reload_process(reload_with_HUP=False)
def _spawn_or_reload_process(self, reload_with_HUP):
"""Spawns or reloads a Dnsmasq process for the network.
When reload_with_HUP is True, dnsmasq receives a HUP signal,
or it's reloaded if the process is not running.
"""
self._output_config_files()
pm = self._get_process_manager(
cmd_callback=self._build_cmdline_callback)
pm.enable(reload_cfg=reload_with_HUP)
self.process_monitor.register(uuid=self.network.id,
service_name=DNSMASQ_SERVICE_NAME,
monitored_process=pm)
def _release_lease(self, mac_address, ip, client_id):
"""Release a DHCP lease."""
if netaddr.IPAddress(ip).version == constants.IP_VERSION_6:
# Note(SridharG) dhcp_release is only supported for IPv4
# addresses. For more details, please refer to man page.
return
cmd = ['dhcp_release', self.interface_name, ip, mac_address]
if client_id:
cmd.append(client_id)
ip_wrapper = ip_lib.IPWrapper(namespace=self.network.namespace)
ip_wrapper.netns.execute(cmd, run_as_root=True)
def _output_config_files(self):
self._output_hosts_file()
self._output_addn_hosts_file()
self._output_opts_file()
def reload_allocations(self):
"""Rebuild the dnsmasq config and signal the dnsmasq to reload."""
# If all subnets turn off dhcp, kill the process.
if not self._enable_dhcp():
self.disable()
LOG.debug('Killing dnsmasq for network since all subnets have '
'turned off DHCP: %s', self.network.id)
return
self._release_unused_leases()
self._spawn_or_reload_process(reload_with_HUP=True)
LOG.debug('Reloading allocations for network: %s', self.network.id)
self.device_manager.update(self.network, self.interface_name)
def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets):
"""Sort fixed_ips so that stateless IPv6 subnets appear first.
For example, If a port with v6 extra_dhcp_opts is on a network with
IPv4 and IPv6 stateless subnets. Then dhcp host file will have
below 2 entries for same MAC,
fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for IPv4 dhcp)
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
(entry for stateless IPv6 for v6 options)
dnsmasq internal details for processing host file entries
1) dnsmaq reads the host file from EOF.
2) So it first picks up stateless IPv6 entry,
fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd
3) But dnsmasq doesn't have sufficient checks to skip this entry and
pick next entry, to process dhcp IPv4 request.
4) So dnsmaq uses this this entry to process dhcp IPv4 request.
5) As there is no ip in this entry, dnsmaq logs "no address available"
and fails to send DHCPOFFER message.
As we rely on internal details of dnsmasq to understand and fix the
issue, Ihar sent a mail to dnsmasq-discuss mailing list
http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/
009650.html
So If we reverse the order of writing entries in host file,
so that entry for stateless IPv6 comes first,
then dnsmasq can correctly fetch the IPv4 address.
"""
return sorted(
fixed_ips,
key=lambda fip: ((fip.subnet_id in v6_nets) and (
v6_nets[fip.subnet_id].ipv6_address_mode == (
constants.DHCPV6_STATELESS))),
reverse=True)
def _iter_hosts(self):
"""Iterate over hosts.
For each host on the network we yield a tuple containing:
(
port, # a DictModel instance representing the port.
alloc, # a DictModel instance of the allocated ip and subnet.
# if alloc is None, it means there is no need to allocate
# an IPv6 address because of stateless DHCPv6 network.
host_name, # Host name.
name, # Canonical hostname in the format 'hostname[.domain]'.
no_dhcp, # A flag indicating that the address doesn't need a DHCP
# IP address.
no_opts, # A flag indication that options shouldn't be written
)
"""
v6_nets = dict((subnet.id, subnet) for subnet in
self.network.subnets if subnet.ip_version == 6)
for port in self.network.ports:
fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips,
v6_nets)
# Confirm whether Neutron server supports dns_name attribute in the
# ports API
dns_assignment = getattr(port, 'dns_assignment', None)
if dns_assignment:
dns_ip_map = {d.ip_address: d for d in dns_assignment}
for alloc in fixed_ips:
no_dhcp = False
no_opts = False
if alloc.subnet_id in v6_nets:
addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode
no_dhcp = addr_mode in (constants.IPV6_SLAAC,
constants.DHCPV6_STATELESS)
# we don't setup anything for SLAAC. It doesn't make sense
# to provide options for a client that won't use DHCP
no_opts = addr_mode == constants.IPV6_SLAAC
# If dns_name attribute is supported by ports API, return the
# dns_assignment generated by the Neutron server. Otherwise,
# generate hostname and fqdn locally (previous behaviour)
if dns_assignment:
hostname = dns_ip_map[alloc.ip_address].hostname
fqdn = dns_ip_map[alloc.ip_address].fqdn
else:
hostname = 'host-%s' % alloc.ip_address.replace(
'.', '-').replace(':', '-')
fqdn = hostname
if self.conf.dhcp_domain:
fqdn = '%s.%s' % (fqdn, self.conf.dhcp_domain)
yield (port, alloc, hostname, fqdn, no_dhcp, no_opts)
def _get_port_extra_dhcp_opts(self, port):
return getattr(port, edo_ext.EXTRADHCPOPTS, False)
def _output_init_lease_file(self):
"""Write a fake lease file to bootstrap dnsmasq.
The generated file is passed to the --dhcp-leasefile option of dnsmasq.
This is used as a bootstrapping mechanism to avoid NAKing active leases
when a dhcp server is scheduled to another agent. Using a leasefile
will also prevent dnsmasq from NAKing or ignoring renewals after a
restart.
Format is as follows:
epoch-timestamp mac_addr ip_addr hostname client-ID
"""
filename = self.get_conf_file_name('leases')
buf = six.StringIO()
LOG.debug('Building initial lease file: %s', filename)
# we make up a lease time for the database entry
if self.conf.dhcp_lease_duration == -1:
# Even with an infinite lease, a client may choose to renew a
# previous lease on reboot or interface bounce so we should have
# an entry for it.
# Dnsmasq timestamp format for an infinite lease is 0.
timestamp = 0
else:
timestamp = int(time.time()) + self.conf.dhcp_lease_duration
dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets
if s.enable_dhcp]
for host_tuple in self._iter_hosts():
port, alloc, hostname, name, no_dhcp, no_opts = host_tuple
# don't write ip address which belongs to a dhcp disabled subnet
# or an IPv6 SLAAC/stateless subnet
if no_dhcp or alloc.subnet_id not in dhcp_enabled_subnet_ids:
continue
ip_address = self._format_address_for_dnsmasq(alloc.ip_address)
# all that matters is the mac address and IP. the hostname and
# client ID will be overwritten on the next renewal.
buf.write('%s %s %s * *\n' %
(timestamp, port.mac_address, ip_address))
contents = buf.getvalue()
utils.replace_file(filename, contents)
LOG.debug('Done building initial lease file %s with contents:\n%s',
filename, contents)
return filename
@staticmethod
def _format_address_for_dnsmasq(address):
# (dzyu) Check if it is legal ipv6 address, if so, need wrap
# it with '[]' to let dnsmasq to distinguish MAC address from
# IPv6 address.
if netaddr.valid_ipv6(address):
return '[%s]' % address
return address
def _output_hosts_file(self):
"""Writes a dnsmasq compatible dhcp hosts file.
The generated file is sent to the --dhcp-hostsfile option of dnsmasq,
and lists the hosts on the network which should receive a dhcp lease.
Each line in this file is in the form::
'mac_address,FQDN,ip_address'
IMPORTANT NOTE: a dnsmasq instance does not resolve hosts defined in
this file if it did not give a lease to a host listed in it (e.g.:
multiple dnsmasq instances on the same network if this network is on
multiple network nodes). This file is only defining hosts which
should receive a dhcp lease, the hosts resolution in itself is
defined by the `_output_addn_hosts_file` method.
"""
buf = six.StringIO()
filename = self.get_conf_file_name('host')
LOG.debug('Building host file: %s', filename)
dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets
if s.enable_dhcp]
# NOTE(ihrachyshka): the loop should not log anything inside it, to
# avoid potential performance drop when lots of hosts are dumped
for host_tuple in self._iter_hosts():
port, alloc, hostname, name, no_dhcp, no_opts = host_tuple
if no_dhcp:
if not no_opts and self._get_port_extra_dhcp_opts(port):
buf.write('%s,%s%s\n' %
(port.mac_address, 'set:', port.id))
continue
# don't write ip address which belongs to a dhcp disabled subnet.
if alloc.subnet_id not in dhcp_enabled_subnet_ids:
continue
ip_address = self._format_address_for_dnsmasq(alloc.ip_address)
if self._get_port_extra_dhcp_opts(port):
client_id = self._get_client_id(port)
if client_id and len(port.extra_dhcp_opts) > 1:
buf.write('%s,%s%s,%s,%s,%s%s\n' %
(port.mac_address, self._ID, client_id, name,
ip_address, 'set:', port.id))
elif client_id and len(port.extra_dhcp_opts) == 1:
buf.write('%s,%s%s,%s,%s\n' %
(port.mac_address, self._ID, client_id, name,
ip_address))
else:
buf.write('%s,%s,%s,%s%s\n' %
(port.mac_address, name, ip_address,
'set:', port.id))
else:
buf.write('%s,%s,%s\n' %
(port.mac_address, name, ip_address))
utils.replace_file(filename, buf.getvalue())
LOG.debug('Done building host file %s with contents:\n%s', filename,
buf.getvalue())
return filename
def _get_client_id(self, port):
if self._get_port_extra_dhcp_opts(port):
for opt in port.extra_dhcp_opts:
if opt.opt_name == edo_ext.CLIENT_ID:
return opt.opt_value
def _read_hosts_file_leases(self, filename):
leases = set()
try:
with open(filename) as f:
for l in f.readlines():
host = l.strip().split(',')
mac = host[0]
client_id = None
if host[1].startswith('set:'):
continue
if host[1].startswith(self._ID):
ip = host[3].strip('[]')
client_id = host[1][len(self._ID):]
else:
ip = host[2].strip('[]')
leases.add((ip, mac, client_id))
except (OSError, IOError):
LOG.debug('Error while reading hosts file %s', filename)
return leases
def _release_unused_leases(self):
filename = self.get_conf_file_name('host')
old_leases = self._read_hosts_file_leases(filename)
new_leases = set()
dhcp_port_exists = False
dhcp_port_on_this_host = self.device_manager.get_device_id(
self.network)
for port in self.network.ports:
client_id = self._get_client_id(port)
for alloc in port.fixed_ips:
new_leases.add((alloc.ip_address, port.mac_address, client_id))
if port.device_id == dhcp_port_on_this_host:
dhcp_port_exists = True
for ip, mac, client_id in old_leases - new_leases:
self._release_lease(mac, ip, client_id)
if not dhcp_port_exists:
self.device_manager.driver.unplug(
self.interface_name, namespace=self.network.namespace)
def _output_addn_hosts_file(self):
"""Writes a dnsmasq compatible additional hosts file.
The generated file is sent to the --addn-hosts option of dnsmasq,
and lists the hosts on the network which should be resolved even if
the dnsmaq instance did not give a lease to the host (see the
`_output_hosts_file` method).
Each line in this file is in the same form as a standard /etc/hosts
file.
"""
buf = six.StringIO()
for host_tuple in self._iter_hosts():
port, alloc, hostname, fqdn, no_dhcp, no_opts = host_tuple
# It is compulsory to write the `fqdn` before the `hostname` in
# order to obtain it in PTR responses.
if alloc:
buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname))
addn_hosts = self.get_conf_file_name('addn_hosts')
utils.replace_file(addn_hosts, buf.getvalue())
return addn_hosts
def _output_opts_file(self):
"""Write a dnsmasq compatible options file."""
options, subnet_index_map = self._generate_opts_per_subnet()
options += self._generate_opts_per_port(subnet_index_map)
name = self.get_conf_file_name('opts')
utils.replace_file(name, '\n'.join(options))
return name
def _generate_opts_per_subnet(self):
options = []
subnet_index_map = {}
if self.conf.enable_isolated_metadata or self.conf.force_metadata:
subnet_to_interface_ip = self._make_subnet_interface_ip_map()
isolated_subnets = self.get_isolated_subnets(self.network)
for i, subnet in enumerate(self.network.subnets):
addr_mode = getattr(subnet, 'ipv6_address_mode', None)
if (not subnet.enable_dhcp or
(subnet.ip_version == 6 and
addr_mode == constants.IPV6_SLAAC)):
continue
if subnet.dns_nameservers:
options.append(
self._format_option(
subnet.ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(
subnet.ip_version, subnet.dns_nameservers))))
else:
# use the dnsmasq ip as nameservers only if there is no
# dns-server submitted by the server
subnet_index_map[subnet.id] = i
if self.conf.dhcp_domain and subnet.ip_version == 6:
options.append('tag:tag%s,option6:domain-search,%s' %
(i, ''.join(self.conf.dhcp_domain)))
gateway = subnet.gateway_ip
host_routes = []
for hr in subnet.host_routes:
if hr.destination == constants.IPv4_ANY:
if not gateway:
gateway = hr.nexthop
else:
host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
# Add host routes for isolated network segments
if (self.conf.force_metadata or
(isolated_subnets[subnet.id] and
self.conf.enable_isolated_metadata and
subnet.ip_version == 4)):
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
)
elif not isolated_subnets[subnet.id] and gateway:
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, gateway)
)
if subnet.ip_version == 4:
host_routes.extend(["%s,0.0.0.0" % (s.cidr) for s in
self.network.subnets
if (s.ip_version == 4 and
s.cidr != subnet.cidr)])
if host_routes:
if gateway:
host_routes.append("%s,%s" % (constants.IPv4_ANY,
gateway))
options.append(
self._format_option(subnet.ip_version, i,
'classless-static-route',
','.join(host_routes)))
options.append(
self._format_option(subnet.ip_version, i,
WIN2k3_STATIC_DNS,
','.join(host_routes)))
if gateway:
options.append(self._format_option(subnet.ip_version,
i, 'router',
gateway))
else:
options.append(self._format_option(subnet.ip_version,
i, 'router'))
return options, subnet_index_map
def _generate_opts_per_port(self, subnet_index_map):
options = []
dhcp_ips = collections.defaultdict(list)
for port in self.network.ports:
if self._get_port_extra_dhcp_opts(port):
port_ip_versions = set(
[netaddr.IPAddress(ip.ip_address).version
for ip in port.fixed_ips])
for opt in port.extra_dhcp_opts:
if opt.opt_name == edo_ext.CLIENT_ID:
continue
opt_ip_version = opt.ip_version
if opt_ip_version in port_ip_versions:
options.append(
self._format_option(opt_ip_version, port.id,
opt.opt_name, opt.opt_value))
else:
LOG.info(_LI("Cannot apply dhcp option %(opt)s "
"because it's ip_version %(version)d "
"is not in port's address IP versions"),
{'opt': opt.opt_name,
'version': opt_ip_version})
# provides all dnsmasq ip as dns-server if there is more than
# one dnsmasq for a subnet and there is no dns-server submitted
# by the server
if port.device_owner == constants.DEVICE_OWNER_DHCP:
for ip in port.fixed_ips:
i = subnet_index_map.get(ip.subnet_id)
if i is None:
continue
dhcp_ips[i].append(ip.ip_address)
for i, ips in dhcp_ips.items():
for ip_version in (4, 6):
vx_ips = [ip for ip in ips
if netaddr.IPAddress(ip).version == ip_version]
if vx_ips:
options.append(
self._format_option(
ip_version, i, 'dns-server',
','.join(
Dnsmasq._convert_to_literal_addrs(ip_version,
vx_ips))))
return options
def _make_subnet_interface_ip_map(self):
ip_dev = ip_lib.IPDevice(self.interface_name,
namespace=self.network.namespace)
subnet_lookup = dict(
(netaddr.IPNetwork(subnet.cidr), subnet.id)
for subnet in self.network.subnets
)
retval = {}
for addr in ip_dev.addr.list():
ip_net = netaddr.IPNetwork(addr['cidr'])
if ip_net in subnet_lookup:
retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
return retval
def _format_option(self, ip_version, tag, option, *args):
"""Format DHCP option by option name or code."""
option = str(option)
pattern = "(tag:(.*),)?(.*)$"
matches = re.match(pattern, option)
extra_tag = matches.groups()[0]
option = matches.groups()[2]
if isinstance(tag, int):
tag = self._TAG_PREFIX % tag
if not option.isdigit():
if ip_version == 4:
option = 'option:%s' % option
else:
option = 'option6:%s' % option
if extra_tag:
tags = ('tag:' + tag, extra_tag[:-1], '%s' % option)
else:
tags = ('tag:' + tag, '%s' % option)
return ','.join(tags + args)
@staticmethod
def _convert_to_literal_addrs(ip_version, ips):
if ip_version == 4:
return ips
return ['[' + ip + ']' for ip in ips]
@classmethod
def get_isolated_subnets(cls, network):
"""Returns a dict indicating whether or not a subnet is isolated
A subnet is considered non-isolated if there is a port connected to
the subnet, and the port's ip address matches that of the subnet's
gateway. The port must be owned by a neutron router.
"""
isolated_subnets = collections.defaultdict(lambda: True)
subnets = dict((subnet.id, subnet) for subnet in network.subnets)
for port in network.ports:
if port.device_owner not in constants.ROUTER_INTERFACE_OWNERS:
continue
for alloc in port.fixed_ips:
if subnets[alloc.subnet_id].gateway_ip == alloc.ip_address:
isolated_subnets[alloc.subnet_id] = False
return isolated_subnets
@classmethod
def should_enable_metadata(cls, conf, network):
"""Determine whether the metadata proxy is needed for a network
This method returns True for truly isolated networks (ie: not attached
to a router) when enable_isolated_metadata is True, or for all the
networks when the force_metadata flags is True.
This method also returns True when enable_metadata_network is True,
and the network passed as a parameter has a subnet in the link-local
CIDR, thus characterizing it as a "metadata" network. The metadata
network is used by solutions which do not leverage the l3 agent for
providing access to the metadata service via logical routers built
with 3rd party backends.
"""
if conf.force_metadata:
return True
if conf.enable_metadata_network and conf.enable_isolated_metadata:
# check if the network has a metadata subnet
meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_CIDR)
if any(netaddr.IPNetwork(s.cidr) in meta_cidr
for s in network.subnets):
return True
if not conf.use_namespaces or not conf.enable_isolated_metadata:
return False
isolated_subnets = cls.get_isolated_subnets(network)
return any(isolated_subnets[subnet.id] for subnet in network.subnets)
class DeviceManager(object):
def __init__(self, conf, plugin):
self.conf = conf
self.plugin = plugin
self.driver = common_utils.load_interface_driver(conf)
def get_interface_name(self, network, port):
"""Return interface(device) name for use by the DHCP process."""
return self.driver.get_device_name(port)
def get_device_id(self, network):
"""Return a unique DHCP device ID for this host on the network."""
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
return commonutils.get_dhcp_agent_device_id(network.id, self.conf.host)
def _set_default_route(self, network, device_name):
"""Sets the default gateway for this dhcp namespace.
This method is idempotent and will only adjust the route if adjusting
it would change it from what it already is. This makes it safe to call
and avoids unnecessary perturbation of the system.
"""
device = ip_lib.IPDevice(device_name, namespace=network.namespace)
gateway = device.route.get_gateway()
if gateway:
gateway = gateway.get('gateway')
for subnet in network.subnets:
skip_subnet = (
subnet.ip_version != 4
or not subnet.enable_dhcp
or subnet.gateway_ip is None)
if skip_subnet:
continue
if gateway != subnet.gateway_ip:
LOG.debug('Setting gateway for dhcp netns on net %(n)s to '
'%(ip)s',
{'n': network.id, 'ip': subnet.gateway_ip})
device.route.add_gateway(subnet.gateway_ip)
return
# No subnets on the network have a valid gateway. Clean it up to avoid
# confusion from seeing an invalid gateway here.
if gateway is not None:
LOG.debug('Removing gateway for dhcp netns on net %s', network.id)
device.route.delete_gateway(gateway)
def _setup_existing_dhcp_port(self, network, device_id, dhcp_subnets):
"""Set up the existing DHCP port, if there is one."""
# To avoid pylint thinking that port might be undefined after
# the following loop...
port = None
# Look for an existing DHCP port for this network.
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
# If using gateway IPs on this port, we can skip the
# following code, whose purpose is just to review and
# update the Neutron-allocated IP addresses for the
# port.
if self.driver.use_gateway_ips:
return port
# Otherwise break out, as we now have the DHCP port
# whose subnets and addresses we need to review.
break
else:
return None
# Compare what the subnets should be against what is already
# on the port.
dhcp_enabled_subnet_ids = set(dhcp_subnets)
port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips)
# If those differ, we need to call update.
if dhcp_enabled_subnet_ids != port_subnet_ids:
# Collect the subnets and fixed IPs that the port already
# has, for subnets that are still in the DHCP-enabled set.
wanted_fixed_ips = []
for fixed_ip in port.fixed_ips:
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
wanted_fixed_ips.append(
{'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
# Add subnet IDs for new DHCP-enabled subnets.
wanted_fixed_ips.extend(
dict(subnet_id=s)
for s in dhcp_enabled_subnet_ids - port_subnet_ids)
# Update the port to have the calculated subnets and fixed
# IPs. The Neutron server will allocate a fresh IP for
# each subnet that doesn't already have one.
port = self.plugin.update_dhcp_port(
port.id,
{'port': {'network_id': network.id,
'fixed_ips': wanted_fixed_ips}})
if not port:
raise exceptions.Conflict()
return port
def _setup_reserved_dhcp_port(self, network, device_id, dhcp_subnets):
"""Setup the reserved DHCP port, if there is one."""
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Checking for a reserved port.',
{'device_id': device_id, 'network_id': network.id})
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == constants.DEVICE_ID_RESERVED_DHCP_PORT:
port = self.plugin.update_dhcp_port(
port.id, {'port': {'network_id': network.id,
'device_id': device_id}})
if port:
return port
def _setup_new_dhcp_port(self, network, device_id, dhcp_subnets):
"""Create and set up new DHCP port for the specified network."""
LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Creating new one.',
{'device_id': device_id, 'network_id': network.id})
# Make a list of the subnets that need a unique IP address for
# this DHCP port.
if self.driver.use_gateway_ips:
unique_ip_subnets = []
else:
unique_ip_subnets = [dict(subnet_id=s) for s in dhcp_subnets]
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=unique_ip_subnets)
return self.plugin.create_dhcp_port({'port': port_dict})
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
# The ID that the DHCP port will have (or already has).
device_id = self.get_device_id(network)
# Get the set of DHCP-enabled subnets on this network.
dhcp_subnets = {subnet.id: subnet for subnet in network.subnets
if subnet.enable_dhcp}
# There are 3 cases: either the DHCP port already exists (but
# might need to be updated for a changed set of subnets); or
# some other code has already prepared a 'reserved' DHCP port,
# and we just need to adopt that; or we need to create a new
# DHCP port. Try each of those in turn until we have a DHCP
# port.
for setup_method in (self._setup_existing_dhcp_port,
self._setup_reserved_dhcp_port,
self._setup_new_dhcp_port):
dhcp_port = setup_method(network, device_id, dhcp_subnets)
if dhcp_port:
break
else:
raise exceptions.Conflict()
# Convert subnet_id to subnet dict
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
ip_address=fixed_ip.ip_address,
subnet=dhcp_subnets[fixed_ip.subnet_id])
for fixed_ip in dhcp_port.fixed_ips]
ips = [DictModel(item) if isinstance(item, dict) else item
for item in fixed_ips]
dhcp_port.fixed_ips = ips
return dhcp_port
def _update_dhcp_port(self, network, port):
for index in range(len(network.ports)):
if network.ports[index].id == port.id:
network.ports[index] = port
break
else:
network.ports.append(port)
def setup(self, network):
"""Create and initialize a device for network's DHCP on this host."""
port = self.setup_dhcp_port(network)
self._update_dhcp_port(network, port)
interface_name = self.get_interface_name(network, port)
if ip_lib.ensure_device_is_ready(interface_name,
namespace=network.namespace):
LOG.debug('Reusing existing device: %s.', interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
namespace=network.namespace)
self.fill_dhcp_udp_checksums(namespace=network.namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
if not ipv6_utils.is_auto_address_subnet(subnet):
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if self.driver.use_gateway_ips:
# For each DHCP-enabled subnet, add that subnet's gateway
# IP address to the Linux device for the DHCP port.
for subnet in network.subnets:
if not subnet.enable_dhcp:
continue
gateway = subnet.gateway_ip
if gateway:
net = netaddr.IPNetwork(subnet.cidr)
ip_cidrs.append('%s/%s' % (gateway, net.prefixlen))
if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces):
ip_cidrs.append(METADATA_DEFAULT_CIDR)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=network.namespace)
# ensure that the dhcp interface is first in the list
if network.namespace is None:
device = ip_lib.IPDevice(interface_name)
device.route.pullup_route(interface_name,
ip_version=constants.IP_VERSION_4)
if self.conf.use_namespaces:
self._set_default_route(network, interface_name)
return interface_name
def update(self, network, device_name):
"""Update device settings for the network's DHCP on this host."""
if self.conf.use_namespaces:
self._set_default_route(network, device_name)
def destroy(self, network, device_name):
"""Destroy the device used for the network's DHCP on this host."""
if device_name:
self.driver.unplug(device_name, namespace=network.namespace)
else:
LOG.debug('No interface exists for network %s', network.id)
self.plugin.release_dhcp_port(network.id,
self.get_device_id(network))
def fill_dhcp_udp_checksums(self, namespace):
"""Ensure DHCP reply packets always have correct UDP checksums."""
iptables_mgr = iptables_manager.IptablesManager(use_ipv6=False,
namespace=namespace)
ipv4_rule = ('-p udp --dport %d -j CHECKSUM --checksum-fill'
% constants.DHCP_RESPONSE_PORT)
iptables_mgr.ipv4['mangle'].add_rule('POSTROUTING', ipv4_rule)
iptables_mgr.apply()
|
|
# Copyright 2011 Grid Dynamics
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import math
import uuid
import netaddr
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import networks_associate
from nova.api.openstack.compute.contrib import os_networks as networks
import nova.context
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
import nova.utils
CONF = cfg.CONF
FAKE_NETWORKS = [
{
'bridge': 'br100', 'vpn_public_port': 1000,
'dhcp_start': '10.0.0.3', 'bridge_interface': 'eth0',
'updated_at': '2011-08-16 09:26:13.048257',
'id': 1, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047',
'cidr_v6': None, 'deleted_at': None,
'gateway': '10.0.0.1', 'label': 'mynet_0',
'project_id': '1234', 'rxtx_base': None,
'vpn_private_address': '10.0.0.2', 'deleted': False,
'vlan': 100, 'broadcast': '10.0.0.7',
'netmask': '255.255.255.248', 'injected': False,
'cidr': '10.0.0.0/29',
'vpn_public_address': '127.0.0.1', 'multi_host': False,
'dns1': None, 'dns2': None, 'host': 'nsokolov-desktop',
'gateway_v6': None, 'netmask_v6': None, 'priority': None,
'created_at': '2011-08-15 06:19:19.387525',
},
{
'bridge': 'br101', 'vpn_public_port': 1001,
'dhcp_start': '10.0.0.11', 'bridge_interface': 'eth0',
'updated_at': None, 'id': 2, 'cidr_v6': None,
'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000',
'deleted_at': None, 'gateway': '10.0.0.9',
'label': 'mynet_1', 'project_id': None,
'vpn_private_address': '10.0.0.10', 'deleted': False,
'vlan': 101, 'broadcast': '10.0.0.15', 'rxtx_base': None,
'netmask': '255.255.255.248', 'injected': False,
'cidr': '10.0.0.10/29', 'vpn_public_address': None,
'multi_host': False, 'dns1': None, 'dns2': None, 'host': None,
'gateway_v6': None, 'netmask_v6': None, 'priority': None,
'created_at': '2011-08-15 06:19:19.885495',
},
]
FAKE_USER_NETWORKS = [
{
'id': 1, 'cidr': '10.0.0.0/29', 'netmask': '255.255.255.248',
'gateway': '10.0.0.1', 'broadcast': '10.0.0.7', 'dns1': None,
'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_0',
'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047',
},
{
'id': 2, 'cidr': '10.0.0.10/29', 'netmask': '255.255.255.248',
'gateway': '10.0.0.9', 'broadcast': '10.0.0.15', 'dns1': None,
'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_1',
'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000',
},
]
NEW_NETWORK = {
"network": {
"bridge_interface": "eth0",
"cidr": "10.20.105.0/24",
"label": "new net 111",
"vlan_start": 111,
}
}
class FakeNetworkAPI(object):
_sentinel = object()
_vlan_is_disabled = False
def __init__(self):
self.networks = copy.deepcopy(FAKE_NETWORKS)
def disable_vlan(self):
self._vlan_is_disabled = True
def delete(self, context, network_id):
for i, network in enumerate(self.networks):
if network['id'] == network_id:
del self.networks[0]
return True
raise exception.NetworkNotFoundForUUID(uuid=network_id)
def disassociate(self, context, network_uuid):
for network in self.networks:
if network.get('uuid') == network_uuid:
network['project_id'] = None
return True
raise exception.NetworkNotFound(network_id=network_uuid)
def associate(self, context, network_uuid, host=_sentinel,
project=_sentinel):
for network in self.networks:
if network.get('uuid') == network_uuid:
if host is not FakeNetworkAPI._sentinel:
network['host'] = host
if project is not FakeNetworkAPI._sentinel:
network['project_id'] = project
return True
raise exception.NetworkNotFound(network_id=network_uuid)
def add_network_to_project(self, context,
project_id, network_uuid=None):
if self._vlan_is_disabled:
raise NotImplementedError()
if network_uuid:
for network in self.networks:
if network.get('project_id', None) is None:
network['project_id'] = project_id
return
return
for network in self.networks:
if network.get('uuid') == network_uuid:
network['project_id'] = project_id
return
def get_all(self, context):
return self._fake_db_network_get_all(context, project_only=True)
def _fake_db_network_get_all(self, context, project_only="allow_none"):
project_id = context.project_id
nets = self.networks
if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
nets = [n for n in self.networks
if (n['project_id'] == project_id or
n['project_id'] is None)]
else:
nets = [n for n in self.networks
if n['project_id'] == project_id]
return nets
def get(self, context, network_id):
for network in self.networks:
if network.get('uuid') == network_id:
return network
raise exception.NetworkNotFound(network_id=network_id)
def create(self, context, **kwargs):
subnet_bits = int(math.ceil(math.log(kwargs.get(
'network_size', CONF.network_size), 2)))
fixed_net_v4 = netaddr.IPNetwork(kwargs['cidr'])
prefixlen_v4 = 32 - subnet_bits
subnets_v4 = list(fixed_net_v4.subnet(
prefixlen_v4,
count=kwargs.get('num_networks', CONF.num_networks)))
new_networks = []
new_id = max((net['id'] for net in self.networks))
for index, subnet_v4 in enumerate(subnets_v4):
new_id += 1
net = {'id': new_id, 'uuid': str(uuid.uuid4())}
net['cidr'] = str(subnet_v4)
net['netmask'] = str(subnet_v4.netmask)
net['gateway'] = kwargs.get('gateway') or str(subnet_v4[1])
net['broadcast'] = str(subnet_v4.broadcast)
net['dhcp_start'] = str(subnet_v4[2])
for key in FAKE_NETWORKS[0].iterkeys():
net.setdefault(key, kwargs.get(key))
new_networks.append(net)
self.networks += new_networks
return new_networks
class NetworksTest(test.NoDBTestCase):
def setUp(self):
super(NetworksTest, self).setUp()
self.fake_network_api = FakeNetworkAPI()
self.controller = networks.NetworkController(
self.fake_network_api)
self.associate_controller = networks_associate\
.NetworkAssociateActionController(self.fake_network_api)
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
@staticmethod
def network_uuid_to_id(network):
network['id'] = network['uuid']
del network['uuid']
def test_network_list_all_as_user(self):
self.maxDiff = None
req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
res_dict = self.controller.index(req)
self.assertEqual(res_dict, {'networks': []})
project_id = req.environ["nova.context"].project_id
cxt = req.environ["nova.context"]
uuid = FAKE_NETWORKS[0]['uuid']
self.fake_network_api.associate(context=cxt,
network_uuid=uuid,
project=project_id)
res_dict = self.controller.index(req)
expected = [FAKE_USER_NETWORKS[0]]
for network in expected:
self.network_uuid_to_id(network)
self.assertEqual(res_dict, {'networks': expected})
def test_network_list_all_as_admin(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
req.environ["nova.context"].is_admin = True
res_dict = self.controller.index(req)
expected = copy.deepcopy(FAKE_NETWORKS)
for network in expected:
self.network_uuid_to_id(network)
self.assertEqual(res_dict, {'networks': expected})
def test_network_disassociate(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
res = self.controller._disassociate_host_and_project(
req, uuid, {'disassociate': None})
self.assertEqual(res.status_int, 202)
self.assertIsNone(self.fake_network_api.networks[0]['project_id'])
self.assertIsNone(self.fake_network_api.networks[0]['host'])
def test_network_disassociate_host_only(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
res = self.associate_controller._disassociate_host_only(
req, uuid, {'disassociate_host': None})
self.assertEqual(res.status_int, 202)
self.assertIsNotNone(self.fake_network_api.networks[0]['project_id'])
self.assertIsNone(self.fake_network_api.networks[0]['host'])
def test_network_disassociate_project_only(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
res = self.associate_controller._disassociate_project_only(
req, uuid, {'disassociate_project': None})
self.assertEqual(res.status_int, 202)
self.assertIsNone(self.fake_network_api.networks[0]['project_id'])
self.assertIsNotNone(self.fake_network_api.networks[0]['host'])
def test_network_disassociate_not_found(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/100/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._disassociate_host_and_project,
req, 100, {'disassociate': None})
def test_network_get_as_user(self):
uuid = FAKE_USER_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
res_dict = self.controller.show(req, uuid)
expected = {'network': copy.deepcopy(FAKE_USER_NETWORKS[0])}
self.network_uuid_to_id(expected['network'])
self.assertEqual(res_dict, expected)
def test_network_get_as_admin(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
req.environ["nova.context"].is_admin = True
res_dict = self.controller.show(req, uuid)
expected = {'network': copy.deepcopy(FAKE_NETWORKS[0])}
self.network_uuid_to_id(expected['network'])
self.assertEqual(res_dict, expected)
def test_network_get_not_found(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/100')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 100)
def test_network_delete(self):
uuid = FAKE_NETWORKS[0]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
res = self.controller.delete(req, 1)
self.assertEqual(res.status_int, 202)
def test_network_delete_not_found(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/100')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, 100)
def test_network_add_vlan_disabled(self):
self.fake_network_api.disable_vlan()
uuid = FAKE_NETWORKS[1]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/add')
self.assertRaises(webob.exc.HTTPNotImplemented,
self.controller.add, req, {'id': uuid})
def test_network_add(self):
uuid = FAKE_NETWORKS[1]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/add')
res = self.controller.add(req, {'id': uuid})
self.assertEqual(res.status_int, 202)
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
req.environ["nova.context"].is_admin = True
res_dict = self.controller.show(req, uuid)
self.assertEqual(res_dict['network']['project_id'], 'fake')
def test_network_associate_with_host(self):
uuid = FAKE_NETWORKS[1]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
res = self.associate_controller._associate_host(
req, uuid, {'associate_host': "TestHost"})
self.assertEqual(res.status_int, 202)
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
req.environ["nova.context"].is_admin = True
res_dict = self.controller.show(req, uuid)
self.assertEqual(res_dict['network']['host'], 'TestHost')
def test_network_create(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
res_dict = self.controller.create(req, NEW_NETWORK)
self.assertIn('network', res_dict)
uuid = res_dict['network']['id']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
res_dict = self.controller.show(req, uuid)
self.assertTrue(res_dict['network']['label'].
startswith(NEW_NETWORK['network']['label']))
def test_network_create_large(self):
req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
large_network = copy.deepcopy(NEW_NETWORK)
large_network['network']['cidr'] = '128.0.0.0/4'
res_dict = self.controller.create(req, large_network)
self.assertEqual(res_dict['network']['cidr'],
large_network['network']['cidr'])
def test_network_neutron_associate_not_implemented(self):
uuid = FAKE_NETWORKS[1]['uuid']
self.flags(network_api_class='nova.network.neutronv2.api.API')
assoc_ctrl = networks_associate.NetworkAssociateActionController()
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
self.assertRaises(webob.exc.HTTPNotImplemented,
assoc_ctrl._associate_host,
req, uuid, {'associate_host': "TestHost"})
def test_network_neutron_disassociate_project_not_implemented(self):
uuid = FAKE_NETWORKS[1]['uuid']
self.flags(network_api_class='nova.network.neutronv2.api.API')
assoc_ctrl = networks_associate.NetworkAssociateActionController()
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
self.assertRaises(webob.exc.HTTPNotImplemented,
assoc_ctrl._disassociate_project_only,
req, uuid, {'disassociate_project': None})
def test_network_neutron_disassociate_host_not_implemented(self):
uuid = FAKE_NETWORKS[1]['uuid']
self.flags(network_api_class='nova.network.neutronv2.api.API')
assoc_ctrl = networks_associate.NetworkAssociateActionController()
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
self.assertRaises(webob.exc.HTTPNotImplemented,
assoc_ctrl._disassociate_host_only,
req, uuid, {'disassociate_host': None})
def test_network_neutron_disassociate_not_implemented(self):
uuid = FAKE_NETWORKS[1]['uuid']
self.flags(network_api_class='nova.network.neutronv2.api.API')
controller = networks.NetworkController()
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
self.assertRaises(webob.exc.HTTPNotImplemented,
controller._disassociate_host_and_project,
req, uuid, {'disassociate': None})
|
|
# Natural Language Toolkit: Decision Tree Classifiers
#
# Copyright (C) 2001-2011 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
#
# $Id: naivebayes.py 2063 2004-07-17 21:02:24Z edloper $
"""
A classifier model that decides which label to assign to a token on
the basis of a tree structure, where branches correspond to conditions
on feature values, and leaves correspond to label assignments.
"""
from nltk.probability import *
from nltk.compat import defaultdict
from api import *
class DecisionTreeClassifier(ClassifierI):
def __init__(self, label, feature_name=None, decisions=None, default=None):
"""
@param label: The most likely label for tokens that reach
this node in the decision tree. If this decision tree
has no children, then this label will be assigned to
any token that reaches this decision tree.
@param feature_name: The name of the feature that this
decision tree selects for.
@param decisions: A dictionary mapping from feature values
for the feature identified by C{feature_name} to
child decision trees.
@param default: The child that will be used if the value of
feature C{feature_name} does not match any of the keys in
C{decisions}. This is used when constructing binary
decision trees.
"""
self._label = label
self._fname = feature_name
self._decisions = decisions
self._default = default
def labels(self):
labels = [self._label]
if self._decisions is not None:
for dt in self._decisions.values():
labels.extend(dt.labels())
if self._default is not None:
labels.extend(self._default.labels())
return list(set(labels))
def classify(self, featureset):
# Decision leaf:
if self._fname is None:
return self._label
# Decision tree:
fval = featureset.get(self._fname)
if fval in self._decisions:
return self._decisions[fval].classify(featureset)
elif self._default is not None:
return self._default.classify(featureset)
else:
return self._label
def error(self, labeled_featuresets):
errors = 0
for featureset, label in labeled_featuresets:
if self.classify(featureset) != label:
errors += 1
return float(errors)/len(labeled_featuresets)
def pp(self, width=70, prefix='', depth=4):
"""
Return a string containing a pretty-printed version of this
decision tree. Each line in this string corresponds to a
single decision tree node or leaf, and indentation is used to
display the structure of the decision tree.
"""
# [xx] display default!!
if self._fname is None:
n = width-len(prefix)-15
return '%s%s %s\n' % (prefix, '.'*n, self._label)
s = ''
for i, (fval, result) in enumerate(sorted(self._decisions.items())):
hdr = '%s%s=%s? ' % (prefix, self._fname, fval)
n = width-15-len(hdr)
s += '%s%s %s\n' % (hdr, '.'*(n), result._label)
if result._fname is not None and depth>1:
s += result.pp(width, prefix+' ', depth-1)
if self._default is not None:
n = width-len(prefix)-21
s += '%selse: %s %s\n' % (prefix, '.'*n, self._default._label)
if self._default._fname is not None and depth>1:
s += self._default.pp(width, prefix+' ', depth-1)
return s
def pseudocode(self, prefix='', depth=4):
"""
Return a string representation of this decision tree that
expresses the decisions it makes as a nested set of pseudocode
if statements.
"""
if self._fname is None:
return "%sreturn %r\n" % (prefix, self._label)
s = ''
for (fval, result) in sorted(self._decisions.items()):
s += '%sif %s == %r: ' % (prefix, self._fname, fval)
if result._fname is not None and depth>1:
s += '\n'+result.pseudocode(prefix+' ', depth-1)
else:
s += 'return %r\n' % result._label
if self._default is not None:
if len(self._decisions) == 1:
s += '%sif %s != %r: '% (prefix, self._fname,
self._decisions.keys()[0])
else:
s += '%selse: ' % (prefix,)
if self._default._fname is not None and depth>1:
s += '\n'+self._default.pseudocode(prefix+' ', depth-1)
else:
s += 'return %r\n' % self._default._label
return s
def __str__(self):
return self.pp()
@staticmethod
def train(labeled_featuresets, entropy_cutoff=0.05, depth_cutoff=100,
support_cutoff=10, binary=False, feature_values=None,
verbose=False):
"""
@param binary: If true, then treat all feature/value pairs a
individual binary features, rather than using a single n-way
branch for each feature.
"""
# Collect a list of all feature names.
feature_names = set()
for featureset, label in labeled_featuresets:
for fname in featureset:
feature_names.add(fname)
# Collect a list of the values each feature can take.
if feature_values is None and binary:
feature_values = defaultdict(set)
for featureset, label in labeled_featuresets:
for fname, fval in featureset.items():
feature_values[fname].add(fval)
# Start with a stump.
if not binary:
tree = DecisionTreeClassifier.best_stump(
feature_names, labeled_featuresets, verbose)
else:
tree = DecisionTreeClassifier.best_binary_stump(
feature_names, labeled_featuresets, feature_values, verbose)
# Refine the stump.
tree.refine(labeled_featuresets, entropy_cutoff, depth_cutoff-1,
support_cutoff, binary, feature_values, verbose)
# Return it
return tree
@staticmethod
def leaf(labeled_featuresets):
label = FreqDist([label for (featureset,label)
in labeled_featuresets]).max()
return DecisionTreeClassifier(label)
@staticmethod
def stump(feature_name, labeled_featuresets):
label = FreqDist([label for (featureset,label)
in labeled_featuresets]).max()
# Find the best label for each value.
freqs = defaultdict(FreqDist) # freq(label|value)
for featureset, label in labeled_featuresets:
feature_value = featureset.get(featurename)
freqs[feature_value].inc(label)
decisions = dict([(val, DecisionTreeClassifier(freqs[val].max()))
for val in freqs])
return DecisionTreeClassifier(label, feature_name, decisions)
def refine(self, labeled_featuresets, entropy_cutoff, depth_cutoff,
support_cutoff, binary=False, feature_values=None,
verbose=False):
if len(labeled_featuresets) <= support_cutoff: return
if self._fname is None: return
if depth_cutoff <= 0: return
for fval in self._decisions:
fval_featuresets = [(featureset,label) for (featureset,label)
in labeled_featuresets
if featureset.get(self._fname) == fval]
label_freqs = FreqDist([label for (featureset,label)
in fval_featuresets])
if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:
self._decisions[fval] = DecisionTreeClassifier.train(
fval_featuresets, entropy_cutoff, depth_cutoff,
support_cutoff, binary, feature_values, verbose)
if self._default is not None:
default_featuresets = [(featureset, label) for (featureset, label)
in labeled_featuresets
if featureset.get(self._fname) not in
self._decisions.keys()]
label_freqs = FreqDist([label for (featureset,label)
in default_featuresets])
if entropy(MLEProbDist(label_freqs)) > entropy_cutoff:
self._default = DecisionTreeClassifier.train(
default_featuresets, entropy_cutoff, depth_cutoff,
support_cutoff, binary, feature_values, verbose)
@staticmethod
def best_stump(feature_names, labeled_featuresets, verbose=False):
best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)
best_error = best_stump.error(labeled_featuresets)
for fname in feature_names:
stump = DecisionTreeClassifier.stump(fname, labeled_featuresets)
stump_error = stump.error(labeled_featuresets)
if stump_error < best_error:
best_error = stump_error
best_stump = stump
if verbose:
print ('best stump for %6d toks uses %-20s err=%6.4f' %
(len(labeled_featuresets), best_stump._fname, best_error))
return best_stump
@staticmethod
def binary_stump(feature_name, feature_value, labeled_featuresets):
label = FreqDist([label for (featureset,label)
in labeled_featuresets]).max()
# Find the best label for each value.
pos_fdist = FreqDist()
neg_fdist = FreqDist()
for featureset, label in labeled_featuresets:
if featureset.get(feature_name) == feature_value:
pos_fdist.inc(label)
else:
neg_fdist.inc(label)
decisions = {feature_value: DecisionTreeClassifier(pos_fdist.max())}
default = DecisionTreeClassifier(neg_fdist.max())
return DecisionTreeClassifier(label, feature_name, decisions, default)
@staticmethod
def best_binary_stump(feature_names, labeled_featuresets, feature_values,
verbose=False):
best_stump = DecisionTreeClassifier.leaf(labeled_featuresets)
best_error = best_stump.error(labeled_featuresets)
for fname in feature_names:
for fval in feature_values[fname]:
stump = DecisionTreeClassifier.binary_stump(
fname, fval, labeled_featuresets)
stump_error = stump.error(labeled_featuresets)
if stump_error < best_error:
best_error = stump_error
best_stump = stump
if best_stump._decisions:
descr = '%s=%s' % (best_stump._fname,
best_stump._decisions.keys()[0])
else:
descr = '(default)'
if verbose:
print ('best stump for %6d toks uses %-20s err=%6.4f' %
(len(labeled_featuresets), descr, best_error))
return best_stump
##//////////////////////////////////////////////////////
## Demo
##//////////////////////////////////////////////////////
def f(x):
return DecisionTreeClassifier.train(x, binary=True, verbose=True)
def demo():
from nltk.classify.util import names_demo, binary_names_demo_features
classifier = names_demo(f, #DecisionTreeClassifier.train,
binary_names_demo_features)
print classifier.pp(depth=7)
print classifier.pseudocode(depth=7)
if __name__ == '__main__':
demo()
|
|
import os
from peewee import *
from peewee import sqlite3
from playhouse.sqlite_ext import CYTHON_SQLITE_EXTENSIONS
from playhouse.sqlite_ext import *
from playhouse._sqlite_ext import BloomFilter
from .base import BaseTestCase
from .base import DatabaseTestCase
from .base import db_loader
from .base import skip_unless
database = CSqliteExtDatabase('peewee_test.db', timeout=100,
hash_functions=1)
class CyDatabaseTestCase(DatabaseTestCase):
database = database
def tearDown(self):
super(CyDatabaseTestCase, self).tearDown()
if os.path.exists(self.database.database):
os.unlink(self.database.database)
def execute(self, sql, *params):
return self.database.execute_sql(sql, params, commit=False)
class TestCySqliteHelpers(CyDatabaseTestCase):
def test_autocommit(self):
self.assertTrue(self.database.autocommit)
self.database.begin()
self.assertFalse(self.database.autocommit)
self.database.rollback()
self.assertTrue(self.database.autocommit)
def test_commit_hook(self):
state = {}
@self.database.on_commit
def on_commit():
state.setdefault('commits', 0)
state['commits'] += 1
self.execute('create table register (value text)')
self.assertEqual(state['commits'], 1)
# Check hook is preserved.
self.database.close()
self.database.connect()
self.execute('insert into register (value) values (?), (?)',
'foo', 'bar')
self.assertEqual(state['commits'], 2)
curs = self.execute('select * from register order by value;')
results = curs.fetchall()
self.assertEqual([tuple(r) for r in results], [('bar',), ('foo',)])
self.assertEqual(state['commits'], 2)
def test_rollback_hook(self):
state = {}
@self.database.on_rollback
def on_rollback():
state.setdefault('rollbacks', 0)
state['rollbacks'] += 1
self.execute('create table register (value text);')
self.assertEqual(state, {})
# Check hook is preserved.
self.database.close()
self.database.connect()
self.database.begin()
self.execute('insert into register (value) values (?)', 'test')
self.database.rollback()
self.assertEqual(state, {'rollbacks': 1})
curs = self.execute('select * from register;')
self.assertEqual(curs.fetchall(), [])
def test_update_hook(self):
state = []
@self.database.on_update
def on_update(query, db, table, rowid):
state.append((query, db, table, rowid))
self.execute('create table register (value text)')
self.execute('insert into register (value) values (?), (?)',
'foo', 'bar')
self.assertEqual(state, [
('INSERT', 'main', 'register', 1),
('INSERT', 'main', 'register', 2)])
# Check hook is preserved.
self.database.close()
self.database.connect()
self.execute('update register set value = ? where rowid = ?', 'baz', 1)
self.assertEqual(state, [
('INSERT', 'main', 'register', 1),
('INSERT', 'main', 'register', 2),
('UPDATE', 'main', 'register', 1)])
self.execute('delete from register where rowid=?;', 2)
self.assertEqual(state, [
('INSERT', 'main', 'register', 1),
('INSERT', 'main', 'register', 2),
('UPDATE', 'main', 'register', 1),
('DELETE', 'main', 'register', 2)])
def test_properties(self):
self.assertTrue(self.database.cache_used is not None)
HUser = Table('users', ('id', 'username'))
class TestHashFunctions(CyDatabaseTestCase):
database = database
def setUp(self):
super(TestHashFunctions, self).setUp()
self.database.execute_sql(
'create table users (id integer not null primary key, '
'username text not null)')
def test_md5(self):
for username in ('charlie', 'huey', 'zaizee'):
HUser.insert({HUser.username: username}).execute(self.database)
query = (HUser
.select(HUser.username,
fn.SUBSTR(fn.SHA1(HUser.username), 1, 6).alias('sha'))
.order_by(HUser.username)
.tuples()
.execute(self.database))
self.assertEqual(query[:], [
('charlie', 'd8cd10'),
('huey', '89b31a'),
('zaizee', 'b4dcf9')])
class TestBackup(CyDatabaseTestCase):
backup_filenames = set(('test_backup.db', 'test_backup1.db',
'test_backup2.db'))
def tearDown(self):
super(TestBackup, self).tearDown()
for backup_filename in self.backup_filenames:
if os.path.exists(backup_filename):
os.unlink(backup_filename)
def _populate_test_data(self, nrows=100, db=None):
db = self.database if db is None else db
db.execute_sql('CREATE TABLE register (id INTEGER NOT NULL PRIMARY KEY'
', value INTEGER NOT NULL)')
with db.atomic():
for i in range(nrows):
db.execute_sql('INSERT INTO register (value) VALUES (?)', (i,))
def test_backup(self):
self._populate_test_data()
# Back-up to an in-memory database and verify contents.
other_db = CSqliteExtDatabase(':memory:')
self.database.backup(other_db)
cursor = other_db.execute_sql('SELECT value FROM register ORDER BY '
'value;')
self.assertEqual([val for val, in cursor.fetchall()], list(range(100)))
other_db.close()
def test_backup_preserve_pagesize(self):
db1 = CSqliteExtDatabase('test_backup1.db')
with db1.connection_context():
db1.page_size = 8192
self._populate_test_data(db=db1)
db1.connect()
self.assertEqual(db1.page_size, 8192)
db2 = CSqliteExtDatabase('test_backup2.db')
db1.backup(db2)
self.assertEqual(db2.page_size, 8192)
nrows, = db2.execute_sql('select count(*) from register;').fetchone()
self.assertEqual(nrows, 100)
def test_backup_to_file(self):
self._populate_test_data()
self.database.backup_to_file('test_backup.db')
backup_db = CSqliteExtDatabase('test_backup.db')
cursor = backup_db.execute_sql('SELECT value FROM register ORDER BY '
'value;')
self.assertEqual([val for val, in cursor.fetchall()], list(range(100)))
backup_db.close()
def test_backup_progress(self):
self._populate_test_data()
accum = []
def progress(remaining, total, is_done):
accum.append((remaining, total, is_done))
other_db = CSqliteExtDatabase(':memory:')
self.database.backup(other_db, pages=1, progress=progress)
self.assertTrue(len(accum) > 0)
sql = 'select value from register order by value;'
self.assertEqual([r for r, in other_db.execute_sql(sql)],
list(range(100)))
other_db.close()
def test_backup_progress_error(self):
self._populate_test_data()
def broken_progress(remaining, total, is_done):
raise ValueError('broken')
other_db = CSqliteExtDatabase(':memory:')
self.assertRaises(ValueError, self.database.backup, other_db,
progress=broken_progress)
other_db.close()
class TestBlob(CyDatabaseTestCase):
def setUp(self):
super(TestBlob, self).setUp()
self.Register = Table('register', ('id', 'data'))
self.execute('CREATE TABLE register (id INTEGER NOT NULL PRIMARY KEY, '
'data BLOB NOT NULL)')
def create_blob_row(self, nbytes):
Register = self.Register.bind(self.database)
Register.insert({Register.data: ZeroBlob(nbytes)}).execute()
return self.database.last_insert_rowid
def test_blob(self):
rowid1024 = self.create_blob_row(1024)
rowid16 = self.create_blob_row(16)
blob = Blob(self.database, 'register', 'data', rowid1024)
self.assertEqual(len(blob), 1024)
blob.write(b'x' * 1022)
blob.write(b'zz')
blob.seek(1020)
self.assertEqual(blob.tell(), 1020)
data = blob.read(3)
self.assertEqual(data, b'xxz')
self.assertEqual(blob.read(), b'z')
self.assertEqual(blob.read(), b'')
blob.seek(-10, 2)
self.assertEqual(blob.tell(), 1014)
self.assertEqual(blob.read(), b'xxxxxxxxzz')
blob.reopen(rowid16)
self.assertEqual(blob.tell(), 0)
self.assertEqual(len(blob), 16)
blob.write(b'x' * 15)
self.assertEqual(blob.tell(), 15)
def test_blob_exceed_size(self):
rowid = self.create_blob_row(16)
blob = self.database.blob_open('register', 'data', rowid)
with self.assertRaisesCtx(ValueError):
blob.seek(17, 0)
with self.assertRaisesCtx(ValueError):
blob.write(b'x' * 17)
blob.write(b'x' * 16)
self.assertEqual(blob.tell(), 16)
blob.seek(0)
data = blob.read(17) # Attempting to read more data is OK.
self.assertEqual(data, b'x' * 16)
blob.close()
def test_blob_errors_opening(self):
rowid = self.create_blob_row(4)
with self.assertRaisesCtx(OperationalError):
blob = self.database.blob_open('register', 'data', rowid + 1)
with self.assertRaisesCtx(OperationalError):
blob = self.database.blob_open('register', 'missing', rowid)
with self.assertRaisesCtx(OperationalError):
blob = self.database.blob_open('missing', 'data', rowid)
def test_blob_operating_on_closed(self):
rowid = self.create_blob_row(4)
blob = self.database.blob_open('register', 'data', rowid)
self.assertEqual(len(blob), 4)
blob.close()
with self.assertRaisesCtx(InterfaceError):
len(blob)
self.assertRaises(InterfaceError, blob.read)
self.assertRaises(InterfaceError, blob.write, b'foo')
self.assertRaises(InterfaceError, blob.seek, 0, 0)
self.assertRaises(InterfaceError, blob.tell)
self.assertRaises(InterfaceError, blob.reopen, rowid)
def test_blob_readonly(self):
rowid = self.create_blob_row(4)
blob = self.database.blob_open('register', 'data', rowid)
blob.write(b'huey')
blob.seek(0)
self.assertEqual(blob.read(), b'huey')
blob.close()
blob = self.database.blob_open('register', 'data', rowid, True)
self.assertEqual(blob.read(), b'huey')
blob.seek(0)
with self.assertRaisesCtx(OperationalError):
blob.write(b'meow')
# BLOB is read-only.
self.assertEqual(blob.read(), b'huey')
class TestBloomFilterIntegration(CyDatabaseTestCase):
database = CSqliteExtDatabase(':memory:', bloomfilter=True)
def setUp(self):
super(TestBloomFilterIntegration, self).setUp()
self.execute('create table register (data TEXT);')
def populate(self):
accum = []
with self.database.atomic():
for i in 'abcdefghijklmnopqrstuvwxyz':
keys = [i * j for j in range(1, 10)]
accum.extend(keys)
self.execute('insert into register (data) values %s' %
', '.join(['(?)'] * len(keys)),
*keys)
curs = self.execute('select * from register '
'order by data limit 5 offset 6')
self.assertEqual([key for key, in curs.fetchall()],
['aaaaaaa', 'aaaaaaaa', 'aaaaaaaaa', 'b', 'bb'])
return accum
def test_bloomfilter(self):
all_keys = self.populate()
curs = self.execute('select bloomfilter(data, ?) from register',
1024 * 128)
buf, = curs.fetchone()
self.assertEqual(len(buf), 1024 * 128)
for key in all_keys:
curs = self.execute('select bloomfilter_contains(?, ?)',
key, buf)
self.assertEqual(curs.fetchone()[0], 1)
for key in all_keys:
key += '-test'
curs = self.execute('select bloomfilter_contains(?, ?)',
key, buf)
self.assertEqual(curs.fetchone()[0], 0)
class TestBloomFilter(BaseTestCase):
n = 1024
def setUp(self):
super(TestBloomFilter, self).setUp()
self.bf = BloomFilter(self.n)
def test_bloomfilter(self):
keys = ('charlie', 'huey', 'mickey', 'zaizee', 'nuggie', 'foo', 'bar',
'baz')
self.bf.add(*keys)
for key in keys:
self.assertTrue(key in self.bf)
for key in keys:
self.assertFalse(key + '-x' in self.bf)
self.assertFalse(key + '-y' in self.bf)
self.assertFalse(key + ' ' in self.bf)
def test_bloomfilter_buffer(self):
self.assertEqual(len(self.bf), self.n)
# Buffer is all zeroes when uninitialized.
buf = self.bf.to_buffer()
self.assertEqual(len(buf), self.n)
self.assertEqual(buf, b'\x00' * self.n)
keys = ('alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta')
self.bf.add(*keys)
for key in keys:
self.assertTrue(key in self.bf)
self.assertFalse(key + '-x' in self.bf)
# Convert to buffer and then populate a 2nd bloom-filter.
buf = self.bf.to_buffer()
new_bf = BloomFilter.from_buffer(buf)
for key in keys:
self.assertTrue(key in new_bf)
self.assertFalse(key + '-x' in new_bf)
# Ensure that the two underlying bloom-filter buffers are equal.
self.assertEqual(len(new_bf), self.n)
new_buf = new_bf.to_buffer()
self.assertEqual(buf, new_buf)
class DataTypes(TableFunction):
columns = ('key', 'value')
params = ()
name = 'data_types'
def initialize(self):
self.values = (
None,
1,
2.,
u'unicode str',
b'byte str',
False,
True)
self.idx = 0
self.n = len(self.values)
def iterate(self, idx):
if idx < self.n:
return ('k%s' % idx, self.values[idx])
raise StopIteration
@skip_unless(sqlite3.sqlite_version_info >= (3, 9), 'requires sqlite >= 3.9')
class TestDataTypesTableFunction(CyDatabaseTestCase):
database = db_loader('sqlite')
def test_data_types_table_function(self):
self.database.register_table_function(DataTypes)
cursor = self.database.execute_sql('SELECT key, value '
'FROM data_types() ORDER BY key')
self.assertEqual(cursor.fetchall(), [
('k0', None),
('k1', 1),
('k2', 2.),
('k3', u'unicode str'),
('k4', b'byte str'),
('k5', 0),
('k6', 1),
])
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from vocabulary.vocabulary import Vocabulary as vb
from vocabulary.responselib import Response as rp
import unittest
import sys
try:
import simplejson as json
except ImportError:
import json
try:
from unittest import mock
except Exception as e:
import mock
class TestModule(unittest.TestCase):
"""Checks for the sanity of all module methods"""
@mock.patch('vocabulary.vocabulary.requests.get')
def test_meaning_found(self, mock_api_call):
res = {
"tuc": [
{
"meanings": [
{
"language": "en",
"text": "the act of singing with closed lips"
}
]
}
]
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
expected_result = '[{"seq": 0, "text": "the act of singing with closed lips"}]'
expected_result = json.dumps(json.loads(expected_result))
result = vb.meaning("humming")
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
@mock.patch('vocabulary.vocabulary.requests.get')
def test_meaning_not_found(self, mock_api_call):
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 404
self.assertFalse(vb.meaning("humming"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_meaning_key_error(self, mock_api_call):
res = {
"result": "ok",
"phrase": "humming"
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
expected_result = '[{"seq": 0, "text": "the act of singing with closed lips"}]'
expected_result = json.dumps(json.loads(expected_result))
self.assertFalse(vb.meaning("humming"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_synonynm_found(self, mock_api_call):
res = {
"tuc": [
{
"phrase": {
"text": "get angry",
"language": "en"
}
},
{
"phrase": {
"text": "mad",
"language": "en"
},
}
]
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
expected_result = '[{"text": "get angry", "seq": 0}, {"text": "mad", "seq": 1}]'
expected_result = json.dumps(json.loads(expected_result))
result = vb.synonym("angry")
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
@mock.patch('vocabulary.vocabulary.requests.get')
def test_synonynm_not_found(self, mock_api_call):
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 404
self.assertFalse(vb.synonym("angry"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_synonynm_tuc_key_error(self, mock_api_call):
res = {
"result": "ok",
"phrase": "angry"
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
self.assertFalse(vb.synonym("angry"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_synonynm_empty_list(self, mock_api_call):
res = {
"result": "ok",
"tuc": [],
"phrase": "angry"
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
self.assertFalse(vb.synonym("angry"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_translate_found(self, mock_api_call):
res = {
"tuc": [
{
"phrase": {
"text": "anglais",
"language": "fr"
}
},
{
"phrase": {
"text": "germanique",
"language": "fr"
},
}
]
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
expected_result = '[{"text": "anglais", "seq": 0}, {"text": "germanique", "seq": 1}]'
expected_result = json.dumps(json.loads(expected_result))
result = vb.translate("english", "en", "fr")
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
@mock.patch('vocabulary.vocabulary.requests.get')
def test_translate_not_found(self, mock_api_call):
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 404
self.assertFalse(vb.translate("english", "en", "fr"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_translate_tuc_key_error(self, mock_api_call):
res = {
"result": "ok",
"phrase": "english"
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
self.assertFalse(vb.translate("english", "en", "fr"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_translate_empty_list(self, mock_api_call):
res = {
"result": "ok",
"tuc": [],
"phrase": "english"
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
self.assertFalse(vb.translate("english", "en", "fr"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_antonym_found(self, mock_api_call):
res = {
"noun": {
"ant": ["hate", "dislike"]
},
"verb": {
"ant": ["hate", "hater"]
}
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
expected_result = '[{"text": "hate", "seq": 0}, {"text": "dislike", "seq": 1}, {"text": "hater", "seq": 2}]'
result = vb.antonym("love")
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
@mock.patch('vocabulary.vocabulary.requests.get')
def test_antonym_not_found(self, mock_api_call):
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 404
self.assertFalse(vb.antonym("love"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_antonym_ant_key_error(self, mock_api_call):
res = {
"noun": {},
"verb": {}
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
self.assertFalse(vb.antonym("love"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_partOfSpeech_found(self, mock_api_call):
res = [
{
"word": "hello",
"partOfSpeech": "interjection",
"text": "greeting"
},
{
"word": "hello",
"partOfSpeech": "verb-intransitive",
"text": "To call."
}
]
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
expected_result = '[{"text": "interjection", "example": "greeting", "seq": 0}, {"text": "verb-intransitive", "example": "To call.", "seq": 1}]'
result = vb.part_of_speech("hello")
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
@mock.patch('vocabulary.vocabulary.requests.get')
def test_partOfSpeech_not_found(self, mock_api_call):
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 404
self.assertFalse(vb.part_of_speech("hello"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_usageExample_found(self, mock_api_call):
res = {
"list": [
{
"definition": "a small mound or hill",
"thumbs_up": 18,
"word": "hillock",
"example": "I went to the to of the hillock to look around.",
"thumbs_down": 3
}
]
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
expected_result = '[{"seq": 0, "text": "I went to the to of the hillock to look around."}]'
result = vb.usage_example("hillock")
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
@mock.patch('vocabulary.vocabulary.requests.get')
def test_usageExample_not_found(self, mock_api_call):
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 404
self.assertFalse(vb.usage_example("hillock"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_usageExample_empty_list(self, mock_api_call):
res = {
"list": [
{
"definition": "a small mound or hill",
"thumbs_up": 0,
"word": "hillock",
"example": "I went to the to of the hillock to look around.",
"thumbs_down": 3
}
]
}
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
self.assertFalse(vb.usage_example("hillock"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_pronunciation_found(self, mock_api_call):
res = [
{
"rawType": "ahd-legacy",
"seq": 0,
"raw": "hip"
},
{
"rawType": "arpabet",
"seq": 0,
"raw": "HH IH2 P AH0 P AA1 T AH0 M AH0 S"
}
]
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
expected_result = '[{"rawType": "ahd-legacy", "raw": "hip", "seq": 0}, {"rawType": "arpabet", "raw": "HH IH2 P AH0 P AA1 T AH0 M AH0 S", "seq": 1}]'
result = vb.pronunciation("hippopotamus")
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
@mock.patch('vocabulary.vocabulary.requests.get')
def test_pronunciation_not_found(self, mock_api_call):
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 404
self.assertFalse(vb.pronunciation("hippopotamus"))
@mock.patch('vocabulary.vocabulary.requests.get')
def test_hyphenation_found(self, mock_api_call):
res = [
{
"seq": 0,
"type": "secondary stress",
"text": "hip"
},
{
"seq": 1,
"text": "po"
}
]
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 200
mock_api_call.return_value.json.return_value = res
expected_result = '[{"seq": 0, "text": "hip", "type": "secondary stress"}, {"seq": 1, "text": "po"}]'
result = vb.hyphenation("hippopotamus")
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
@mock.patch('vocabulary.vocabulary.requests.get')
def test_hyphenation_not_found(self, mock_api_call):
mock_api_call.return_value = mock.Mock()
mock_api_call.return_value.status_code = 404
self.assertFalse(vb.hyphenation("hippopotamus"))
def test_respond_as_dict_1(self):
data = json.loads('[{"text": "hummus", "seq": 0}]')
expected_result = {0: {"text": "hummus"}}
result = rp().respond(data, 'dict')
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
def test_respond_as_dict_2(self):
data = json.loads('[{"text": "hummus", "seq": 0},{"text": "hummusy", "seq": 1}]')
expected_result = {0: {"text": "hummus"}, 1: {"text": "hummusy"}}
result = rp().respond(data, 'dict')
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
def test_respond_as_dict_3(self):
data = json.loads('{"text": ["hummus"]}')
expected_result = {"text": "hummus"}
result = rp().respond(data, 'dict')
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
def test_respond_as_list_1(self):
data = json.loads('[{"text": "hummus", "seq": 0}]')
expected_result = ["hummus"]
result = rp().respond(data, 'list')
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
def test_respond_as_list_2(self):
data = json.loads('[{"text": "hummus", "seq": 0},{"text": "hummusy", "seq": 1}]')
expected_result = ["hummus", "hummusy"]
result = rp().respond(data, 'list')
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
def test_respond_as_list_3(self):
data = json.loads('{"text": ["hummus"]}')
expected_result = ["hummus"]
result = rp().respond(data, 'list')
if sys.version_info[:2] <= (2, 7):
self.assertItemsEqual(expected_result, result)
else:
self.assertCountEqual(expected_result, result)
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
from kivy.lang import Builder
from kivy.uix.modalview import ModalView
from kivymd.label import MDLabel
from kivymd.theming import ThemableBehavior
from kivy.uix.floatlayout import FloatLayout
from kivymd.elevationbehavior import ElevationBehavior
import calendar
from datetime import date
import datetime
from kivy.properties import StringProperty, NumericProperty, ObjectProperty, \
BooleanProperty
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.behaviors import ButtonBehavior
from kivymd.ripplebehavior import CircularRippleBehavior
from kivy.clock import Clock
from kivy.core.window import Window
Builder.load_string("""
#:import calendar calendar
<MDDatePicker>
cal_layout: cal_layout
size_hint: (None, None)
size: [dp(328), dp(484)] if self.theme_cls.device_orientation == 'portrait'\
else [dp(512), dp(304)]
pos_hint: {'center_x': .5, 'center_y': .5}
canvas:
Color:
rgb: app.theme_cls.primary_color
Rectangle:
size: [dp(328), dp(96)] if self.theme_cls.device_orientation == 'portrait'\
else [dp(168), dp(304)]
pos: [root.pos[0], root.pos[1] + root.height-dp(96)] if self.theme_cls.device_orientation == 'portrait'\
else [root.pos[0], root.pos[1] + root.height-dp(304)]
Color:
rgb: app.theme_cls.bg_normal
Rectangle:
size: [dp(328), dp(484)-dp(96)] if self.theme_cls.device_orientation == 'portrait'\
else [dp(344), dp(304)]
pos: [root.pos[0], root.pos[1] + root.height-dp(96)-(dp(484)-dp(96))]\
if self.theme_cls.device_orientation == 'portrait' else [root.pos[0]+dp(168), root.pos[1]] #+dp(334)
MDLabel:
id: label_full_date
font_style: 'Display1'
text_color: 1, 1, 1, 1
theme_text_color: 'Custom'
size_hint: (None, None)
size: [root.width, dp(30)] if root.theme_cls.device_orientation == 'portrait'\
else [dp(168), dp(30)]
pos: [root.pos[0]+dp(23), root.pos[1] + root.height - dp(74)] \
if root.theme_cls.device_orientation == 'portrait' \
else [root.pos[0]+dp(3), root.pos[1] + dp(214)]
line_height: 0.84
valign: 'middle'
text_size: [root.width, None] if root.theme_cls.device_orientation == 'portrait'\
else [dp(149), None]
bold: True
text: root.fmt_lbl_date(root.sel_year, root.sel_month, root.sel_day, root.theme_cls.device_orientation)
MDLabel:
id: label_year
font_style: 'Subhead'
text_color: 1, 1, 1, 1
theme_text_color: 'Custom'
size_hint: (None, None)
size: root.width, dp(30)
pos: (root.pos[0]+dp(23), root.pos[1]+root.height-dp(40)) if root.theme_cls.device_orientation == 'portrait'\
else (root.pos[0]+dp(16), root.pos[1]+root.height-dp(41))
valign: 'middle'
text: str(root.sel_year)
GridLayout:
id: cal_layout
cols: 7
size: (dp(44*7), dp(40*7)) if root.theme_cls.device_orientation == 'portrait'\
else (dp(46*7), dp(32*7))
col_default_width: dp(42) if root.theme_cls.device_orientation == 'portrait'\
else dp(39)
size_hint: (None, None)
padding: (dp(2), 0) if root.theme_cls.device_orientation == 'portrait'\
else (dp(7), 0)
spacing: (dp(2), 0) if root.theme_cls.device_orientation == 'portrait'\
else (dp(7), 0)
pos: (root.pos[0]+dp(10), root.pos[1]+dp(60)) if root.theme_cls.device_orientation == 'portrait'\
else (root.pos[0]+dp(168)+dp(8), root.pos[1]+dp(48))
MDLabel:
id: label_month_selector
font_style: 'Body2'
text: calendar.month_name[root.month].capitalize() + ' ' + str(root.year)
size_hint: (None, None)
size: root.width, dp(30)
pos: root.pos
theme_text_color: 'Primary'
pos_hint: {'center_x': 0.5, 'center_y': 0.75} if self.theme_cls.device_orientation == 'portrait'\
else {'center_x': 0.67, 'center_y': 0.915}
valign: "middle"
halign: "center"
MDIconButton:
icon: 'chevron-left'
theme_text_color: 'Secondary'
pos_hint: {'center_x': 0.09, 'center_y': 0.745} if root.theme_cls.device_orientation == 'portrait'\
else {'center_x': 0.39, 'center_y': 0.925}
on_release: root.change_month('prev')
MDIconButton:
icon: 'chevron-right'
theme_text_color: 'Secondary'
pos_hint: {'center_x': 0.92, 'center_y': 0.745} if root.theme_cls.device_orientation == 'portrait'\
else {'center_x': 0.94, 'center_y': 0.925}
on_release: root.change_month('next')
MDFlatButton:
pos: root.pos[0]+root.size[0]-dp(72)*2, root.pos[1] + dp(7)
text: "Cancel"
on_release: root.dismiss()
MDFlatButton:
pos: root.pos[0]+root.size[0]-dp(72), root.pos[1] + dp(7)
text: "OK"
on_release: root.ok_click()
<DayButton>
size_hint: None, None
size: (dp(40), dp(40)) if root.theme_cls.device_orientation == 'portrait'\
else (dp(32), dp(32))
MDLabel:
font_style: 'Caption'
theme_text_color: 'Custom' if root.is_today and not root.is_selected else 'Primary'
text_color: root.theme_cls.primary_color
opposite_colors: root.is_selected if root.owner.sel_month == root.owner.month \
and root.owner.sel_year == root.owner.year and str(self.text) == str(root.owner.sel_day) else False
size_hint_x: None
valign: 'middle'
halign: 'center'
text: root.text
<WeekdayLabel>
font_style: 'Caption'
theme_text_color: 'Secondary'
size: (dp(40), dp(40)) if root.theme_cls.device_orientation == 'portrait'\
else (dp(32), dp(32))
size_hint: None, None
text_size: self.size
valign: 'middle' if root.theme_cls.device_orientation == 'portrait' else 'bottom'
halign: 'center'
<DaySelector>
size: (dp(40), dp(40)) if root.theme_cls.device_orientation == 'portrait'\
else (dp(32), dp(32))
size_hint: (None, None)
canvas:
Color:
rgba: self.theme_cls.primary_color if self.shown else [0, 0, 0, 0]
Ellipse:
size: (dp(40), dp(40)) if root.theme_cls.device_orientation == 'portrait'\
else (dp(32), dp(32))
pos: self.pos if root.theme_cls.device_orientation == 'portrait'\
else [self.pos[0] + dp(3), self.pos[1]]
""")
class DaySelector(ThemableBehavior, AnchorLayout):
shown = BooleanProperty(False)
def __init__(self, parent):
super(DaySelector, self).__init__()
self.parent_class = parent
self.parent_class.add_widget(self, index=7)
self.selected_widget = None
Window.bind(on_resize=self.move_resize)
def update(self):
parent = self.parent_class
if parent.sel_month == parent.month and parent.sel_year == parent.year:
self.shown = True
else:
self.shown = False
def set_widget(self, widget):
self.selected_widget = widget
self.pos = widget.pos
self.move_resize(do_again=True)
self.update()
def move_resize(self, window=None, width=None, height=None, do_again=True):
self.pos = self.selected_widget.pos
if do_again:
Clock.schedule_once(lambda x: self.move_resize(do_again=False), 0.01)
class DayButton(ThemableBehavior, CircularRippleBehavior, ButtonBehavior,
AnchorLayout):
text = StringProperty()
owner = ObjectProperty()
is_today = BooleanProperty(False)
is_selected = BooleanProperty(False)
def on_release(self):
self.owner.set_selected_widget(self)
class WeekdayLabel(MDLabel):
pass
class MDDatePicker(FloatLayout, ThemableBehavior, ElevationBehavior,
ModalView):
_sel_day_widget = ObjectProperty()
cal_list = None
cal_layout = ObjectProperty()
sel_year = NumericProperty()
sel_month = NumericProperty()
sel_day = NumericProperty()
day = NumericProperty()
month = NumericProperty()
year = NumericProperty()
today = date.today()
callback = ObjectProperty()
class SetDateError(Exception):
pass
def __init__(self, callback, year=None, month=None, day=None,
firstweekday=0,
**kwargs):
self.callback = callback
self.cal = calendar.Calendar(firstweekday)
self.sel_year = year if year else self.today.year
self.sel_month = month if month else self.today.month
self.sel_day = day if day else self.today.day
self.month = self.sel_month
self.year = self.sel_year
self.day = self.sel_day
super(MDDatePicker, self).__init__(**kwargs)
self.selector = DaySelector(parent=self)
self.generate_cal_widgets()
self.update_cal_matrix(self.sel_year, self.sel_month)
self.set_month_day(self.sel_day)
self.selector.update()
def ok_click(self):
self.callback(date(self.sel_year, self.sel_month, self.sel_day))
self.dismiss()
def fmt_lbl_date(self, year, month, day, orientation):
d = datetime.date(int(year), int(month), int(day))
separator = '\n' if orientation == 'landscape' else ' '
return d.strftime('%a,').capitalize() + separator + d.strftime(
'%b').capitalize() + ' ' + str(day).lstrip('0')
def set_date(self, year, month, day):
try:
date(year, month, day)
except Exception as e:
print(e)
if str(e) == "day is out of range for month":
raise self.SetDateError(" Day %s day is out of range for month %s" % (day, month))
elif str(e) == "month must be in 1..12":
raise self.SetDateError("Month must be between 1 and 12, got %s" % month)
elif str(e) == "year is out of range":
raise self.SetDateError("Year must be between %s and %s, got %s" %
(datetime.MINYEAR, datetime.MAXYEAR, year))
else:
self.sel_year = year
self.sel_month = month
self.sel_day = day
self.month = self.sel_month
self.year = self.sel_year
self.day = self.sel_day
self.update_cal_matrix(self.sel_year, self.sel_month)
self.set_month_day(self.sel_day)
self.selector.update()
def set_selected_widget(self, widget):
if self._sel_day_widget:
self._sel_day_widget.is_selected = False
widget.is_selected = True
self.sel_month = int(self.month)
self.sel_year = int(self.year)
self.sel_day = int(widget.text)
self._sel_day_widget = widget
self.selector.set_widget(widget)
def set_month_day(self, day):
for idx in range(len(self.cal_list)):
if str(day) == str(self.cal_list[idx].text):
self._sel_day_widget = self.cal_list[idx]
self.sel_day = int(self.cal_list[idx].text)
if self._sel_day_widget:
self._sel_day_widget.is_selected = False
self._sel_day_widget = self.cal_list[idx]
self.cal_list[idx].is_selected = True
self.selector.set_widget(self.cal_list[idx])
def update_cal_matrix(self, year, month):
try:
dates = [x for x in self.cal.itermonthdates(year, month)]
except ValueError as e:
if str(e) == "year is out of range":
pass
else:
self.year = year
self.month = month
for idx in range(len(self.cal_list)):
if idx >= len(dates) or dates[idx].month != month:
self.cal_list[idx].disabled = True
self.cal_list[idx].text = ''
else:
self.cal_list[idx].disabled = False
self.cal_list[idx].text = str(dates[idx].day)
self.cal_list[idx].is_today = dates[idx] == self.today
self.selector.update()
def generate_cal_widgets(self):
cal_list = []
for i in calendar.day_abbr:
self.cal_layout.add_widget(WeekdayLabel(text=i[0].upper()))
for i in range(6 * 7): # 6 weeks, 7 days a week
db = DayButton(owner=self)
cal_list.append(db)
self.cal_layout.add_widget(db)
self.cal_list = cal_list
def change_month(self, operation):
op = 1 if operation is 'next' else -1
sl, sy = self.month, self.year
m = 12 if sl + op == 0 else 1 if sl + op == 13 else sl + op
y = sy - 1 if sl + op == 0 else sy + 1 if sl + op == 13 else sy
self.update_cal_matrix(y, m)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 27 12:40:48 2018
@author: BallBlueMeercat
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from results import load
def stat(hue, var, var_true, var_name, slnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key):
name_l = var_name.lower()
initial = name_l[:1]
name_true = initial + '_true'
hue_name = hue
hue = 'xkcd:'+hue
# Marginalised distribution histogram.
plt.figure()
# plt.xlabel(r'$\{}$'.format(name_l))
plt.xlabel(name_l)
plt.title('model: '+firstderivs_key+'\n Marginalised distribution of '
+name_l+' \n nsteps: '+str(nsteps)+', noise: '+str(sigma)
+', npoints: '+str(len(zpicks))+' '+firstderivs_key)
plt.hist(var, 50, facecolor=hue)
stamp = str(int(time.time()))
filename = str(stamp)+'_'+initial+'_mhist__nsteps_'+str(nsteps) \
+'_nwalkers_'+str(nwalkers)+'_noise_'+str(sigma) \
+'_numpoints_'+str(len(zpicks))+'.png'
filename = os.path.join(save_path, filename)
plt.savefig(filename)
# Walker steps.
plt.figure()
plt.xlabel(name_l)
plt.title('model: '+firstderivs_key+'\n lnprobability of '+name_l
+' \n nsteps: '+str(nsteps)+', noise: '+str(sigma)
+', npoints: '+str(len(zpicks)))
plt.plot(var, slnprob, '.', color=hue)
stamp = str(int(time.time()))
filename = str(stamp)+'_'+initial+'_steps__nsteps_'+str(nsteps) \
+'_nwalkers_'+str(nwalkers)+'_noise_'+str(sigma) \
+'_numpoints_'+str(len(zpicks))+'.png'
filename = os.path.join(save_path, filename)
plt.savefig(filename)
# Chains.
plt.figure()
plt.xlabel('step number')
# plt.ylabel(r'$\{}$'.format(name_l))
plt.ylabel(name_l)
plt.title('model: '+firstderivs_key+'\n flatchains, '+name_true+
' in '+hue_name+' \n nsteps: '+str(nsteps)+', noise: '
+str(sigma)+', npoints: '+str(len(zpicks)))
plt.plot(var.T, '-', color='k', alpha=0.3)
plt.axhline(var_true, color=hue)
stamp = str(int(time.time()))
filename = str(stamp)+'_'+initial+'_chain__nsteps_'+str(nsteps) \
+'_nwalkers_'+str(nwalkers)+'_noise_'+str(sigma) \
+'_numpoints_'+str(len(zpicks))+'.png'
filename = os.path.join(save_path, filename)
plt.savefig(filename)
plt.show(block=False)
return
def onepercent():
direclist = []
for d in os.walk('./results/'):
direclist.append(d[0])
direclist.pop(0)
m_vc = []
g_vc = []
sigma =[]
npoints = []
for directory in direclist:
m_vc += load(directory, 'm_vc.p')
g_vc += load(directory, 'g_vc.p')
sigma += load(directory, 'sigma.p')
npoints += load(directory, 'npoints.p')
m_vc = np.asarray(m_vc)
g_vc = np.asarray(g_vc)
sigma = np.asarray(sigma)
npoints = np.asarray(npoints)
m_pi = np.where(m_vc < 1) # Indicies of rows with m_vc < 1%.
m_pinpoints = npoints[m_pi]
m_pisigma = sigma[m_pi]
g_pi = np.where(g_vc < 1.5) # Indicies of rows with g_vc < 1%.
g_pinpoints = npoints[g_pi]
g_pisigma = sigma[g_pi]
m_sinpoints = [] # Single results, removing doubles of nsteps.
m_sisigma = []
g_sinpoints = [] # Single results, removing doubles of nsteps.
g_sisigma = []
for i in range(len(m_pinpoints)):
if m_pinpoints[i] in m_sinpoints:
index = np.where(m_sinpoints == m_pinpoints[i])
index = int(index[0])
if m_pisigma[i] > m_sisigma[index]:
m_sisigma[index] = m_pisigma[i]
else:
m_sinpoints.append(m_pinpoints[i])
m_sisigma.append(m_pisigma[i])
for i in range(len(g_pinpoints)):
if g_pinpoints[i] in g_sinpoints:
index = np.where(g_sinpoints == g_pinpoints[i])
index = int(index[0])
if g_pisigma[i] > g_sisigma[index]:
g_sisigma[index] = g_pisigma[i]
else:
g_sinpoints.append(g_pinpoints[i])
g_sisigma.append(g_pisigma[i])
# ind = np.ones((10,), bool)
# ind[n] = False
# A1 = A[ind,:]
plt.figure()
plt.xlabel('dataset size')
plt.ylabel('sigma of noise added to data')
plt.title('noisiest runs where m was found within 1%')
plt.scatter(m_sinpoints, m_sisigma, c='m', label='1% sd on m')
plt.scatter(npoints, sigma, c='c', marker='x', label='all runs')
plt.legend()
plt.figure()
plt.xlabel('dataset size')
plt.ylabel('sigma of noise added to data')
plt.title('noisiest runs where gamma was found within 1.5%')
plt.scatter(g_sinpoints, g_sisigma, c='g', label='sd on gamma')
plt.scatter(npoints, sigma, c='c', marker='x', label='all runs')
plt.legend()
plt.show()
return m_vc, g_vc, sigma, npoints
#m_vc, g_vc, sigma, npoints = onepercent()
def ivcdmcheck(mag, zpicks, firstderivs_key, plot_var_dict):
firstderivs_key_3 = firstderivs_key[0]
firstderivs_key_1 = firstderivs_key[1]
firstderivs_key_2 = firstderivs_key[2]
t_1, dlpc_1, dl_1, a_1, ombar_m_1, gamma_1, ombar_de_1, ombar_m0_1, ombar_de0_1 = plot_var_dict['plot_var_1']
t_2, dlpc_2, dl_2, a_2, ombar_m_2, gamma_2, ombar_de_2, ombar_m0_2, ombar_de0_2 = plot_var_dict['plot_var_2']
t_3, dlpc_3, dl_3, a_3, ombar_m_3, gamma_3, ombar_de_3, ombar_m0_3, ombar_de0_3 = plot_var_dict['plot_var_3']
# Changing time into age
t_1, t_2, t_3 = -t_1, -t_2, -t_3
# # Scale factor vs redshift.
# fig, ax = plt.subplots()
# plt.xlabel('redshift $z$')
# plt.ylabel('a')
# plt.grid(True)
# ax.plot(zpicks, a_1, 'r:', label=firstderivs_key_1)
# ax.plot(zpicks, a_2, 'g-.', label=firstderivs_key_2)
# ax.plot(zpicks, a_3, 'b-', label=firstderivs_key_3)
# plt.title('Scale factor evolution, $\gamma$ = %s'%(gamma_1))
# ax.legend()
# H vs redshift.
Hz_1 = (ombar_m_1 + ombar_de_1)**(1/2)
Hz_2 = (ombar_m_2 + ombar_de_2)**(1/2)
Hz_3 = (ombar_m_3 + ombar_de_3)**(1/2)
fig, ax = plt.subplots()
plt.xlabel('redshift $z$')
plt.ylabel('H')
plt.grid(True)
ax.plot(zpicks, Hz_1, 'r:', label=firstderivs_key_1)
ax.plot(zpicks, Hz_2, 'g-.', label=firstderivs_key_2)
ax.plot(zpicks, Hz_3, 'b-', label=firstderivs_key_3)
plt.title('H evolution, $\gamma$ = %s'%(gamma_1))
ax.legend()
# ombar_m, ombar_de vs redshift.
fig, ax = plt.subplots()
plt.xlabel('redshift $z$')
plt.ylabel(r'$\bar \Omega $')
plt.grid(True)
ax.plot(zpicks, ombar_m_1, 'r:', label='m %s'%firstderivs_key_1)
ax.plot(zpicks, ombar_m_2, 'g-.', label='m %s'%firstderivs_key_2)
ax.plot(zpicks, ombar_m_3, 'b-', label='m %s'%firstderivs_key_3)
ax.plot(zpicks, ombar_de_1, 'm:', label='de %s'%firstderivs_key_1)
ax.plot(zpicks, ombar_de_2, 'c-.', label='de %s'%firstderivs_key_2)
ax.plot(zpicks, ombar_de_3, 'k-', label='de %s'%firstderivs_key_3)
plt.title(r'$\bar \Omega_{m}$, $\bar \Omega_{DE}$ evolution')
ax.legend()
# ombar_m, ombar_de vs redshift log x axis.
fig, ax = plt.subplots()
plt.xlabel('redshift $z$')
plt.ylabel(r'$\bar \Omega $')
plt.grid(True)
ax.semilogx(zpicks, ombar_m_1, 'r:', label='m %s'%firstderivs_key_1)
ax.semilogx(zpicks, ombar_m_2, 'g-.', label='m %s'%firstderivs_key_2)
ax.semilogx(zpicks, ombar_m_3, 'b-', label='m %s'%firstderivs_key_3)
ax.semilogx(zpicks, ombar_de_1, 'm:', label='de %s'%firstderivs_key_1)
ax.semilogx(zpicks, ombar_de_2, 'c-.', label='de %s'%firstderivs_key_2)
ax.semilogx(zpicks, ombar_de_3, 'k-', label='de %s'%firstderivs_key_3)
ax.semilogx(zpicks, ombar_m_1, 'r:', label='m %s'%firstderivs_key_1)
plt.title(r'$\bar \Omega_{m}$, $\bar \Omega_{DE}$ evolution')
ax.legend()
# # ombar_m vs redshift.
# fig, ax = plt.subplots()
# plt.xlabel('redshift $z$')
# plt.ylabel('$\Omega_{m}$')
# plt.grid(True)
# ax.plot(zpicks, ombar_m_1, 'r:', label=firstderivs_key_1)
# ax.plot(zpicks, ombar_m_2, 'g-.', label=firstderivs_key_2)
# ax.plot(zpicks, ombar_m_3, 'b-', label=firstderivs_key_3)
# plt.title('$\Omega_{m}$ evolution, $\gamma$ = %s'%(gamma_1))
# ax.legend()
#
# # ombar_de vs redshift.
# fig, ax = plt.subplots()
# plt.xlabel('redshift $z$')
# plt.ylabel('$\Omega_{DE}$')
# plt.grid(True)
# ax.plot(zpicks, ombar_de_1, 'r:', label=firstderivs_key_1)
# ax.plot(zpicks, ombar_de_2, 'g-.', label=firstderivs_key_2)
# ax.plot(zpicks, ombar_de_3, 'b-', label=firstderivs_key_3)
# plt.title('$\Omega_{DE}$ evolution, $\gamma$ = %s'%(gamma_1))
# ax.legend()
#
# # Luminosity distance vs redshift.
# fig, ax = plt.subplots()
# plt.xlabel('redshift $z$')
# plt.ylabel('$d_L$*($H_0$/c)')
# plt.grid(True)
# ax.plot(zpicks, dl_1, 'r:', label=firstderivs_key_1)
# ax.plot(zpicks, dl_2, 'g-.', label=firstderivs_key_2)
# ax.plot(zpicks, dl_3, 'b-', label=firstderivs_key_3)
# plt.title('$d_L$ evolution, $\gamma$ = %s'%(gamma_1))
# ax.legend()
#
# # Redshift vs age.
# fig, ax = plt.subplots()
# plt.xlabel('age')
# plt.ylabel('redshift $z$')
# plt.grid(True)
# ax.plot(t_1, zpicks, 'r:', label=firstderivs_key_1)
# ax.plot(t_2, zpicks, 'g-.', label=firstderivs_key_2)
# ax.plot(t_3, zpicks, 'b-', label=firstderivs_key_3)
# plt.title('Redshift evolution, $\gamma$ = %s'%(gamma_1))
# ax.legend()
#
# # Scale factor vs -time.
# fig, ax = plt.subplots()
# plt.xlabel('age')
# plt.ylabel('a')
# plt.grid(True)
# ax.plot(t_1, a_1, 'r:', label=firstderivs_key_1)
# ax.plot(t_2, a_2, 'g-.', label=firstderivs_key_2)
# ax.plot(t_3, a_3, 'b-', label=firstderivs_key_3)
# plt.title('Scale factor evolution, $\gamma$ = %s'%(gamma_1))
# ax.legend()
#
# # Magnitude vs redshift.
#
# mag_1 = plot_var_dict['mag_1']
# mag_2 = plot_var_dict['mag_2']
# mag_3 = plot_var_dict['mag_3']
#
# fig, ax = plt.subplots()
# plt.xlabel('redshift $z$')
# plt.ylabel('magnitude')
# plt.title('Magnitude evolution, $\gamma$ = %s'%(gamma_1))
# plt.scatter(zpicks, mag_1, c='r', marker=',', linewidths=0.1, label=firstderivs_key_1)
# plt.scatter(zpicks, mag_2, c='g', marker='x', linewidths=0.1, label=firstderivs_key_2)
# plt.scatter(zpicks, mag_3, c='b', marker='.', linewidths=0.2, label=firstderivs_key_3)
# ax.legend()
plt.show()
return
def modelcheck(mag, zpicks, plot_var_1, firstderivs_key):
t, dlpc, dl, a, ombar_m, gamma, ombar_de, ombar_m0, ombar_de0 = plot_var_1
t = -t
if min(ombar_m) < 0:
print('unphysical ombar_m', str(min(ombar_m)))
elif min(ombar_de) < 0:
print('unphysical ombar_de', str(min(ombar_de)))
else:
print()
print('a:',a[-1], '---->',a[0])
print('ombar_de:',ombar_de[-1], '---->',ombar_de[0])
print('ombar_m:',ombar_m[-1], '---->',ombar_m[0])
print('z:',zpicks[-1], '---->',zpicks[0])
# Scale factor vs redshift.
plt.figure()
plt.xlabel('redshift $z$')
plt.ylabel('a')
plt.grid(True)
plt.plot(zpicks, a, 'xkcd:crimson', lw=1)
plt.title('Scale factor evolution, model = %s, $\gamma$ = %s'
%(firstderivs_key, gamma))
# plt.title(r'Scale factor evolution, IC: $\bar \Omega_{m0}$ = %s, $\bar
# \Omega_{DE0}$ =%s, $\gamma$ = %s'
# %(ombar_m0, ombar_de0, gamma))
Hz = []
for i in range(len(ombar_m)):
H = (ombar_m[i] + ombar_de[i])**(1/2)
if np.isnan(H):
print('plots.modelcheck got NaN value for H')
Hz.append(H)
print('Hz:',Hz[-1], '---->',Hz[0])
# H vs redshift.
plt.figure()
plt.xlabel('redshift $z$')
plt.ylabel('H')
plt.grid(True)
plt.plot(zpicks, Hz, color='xkcd:blue', lw=1)
plt.title(r'H evolution, model = %s, $\gamma$ = %s'
%(firstderivs_key, gamma))
# ombar_m, ombar_de vs redshift.
plt.figure()
plt.xlabel('redshift $z$')
plt.ylabel(r'$\bar \Omega $')
plt.grid(True)
plt.plot(zpicks, ombar_m, label=r'$\bar \Omega_{m}$',
color='xkcd:coral', linestyle=':')
plt.plot(zpicks, ombar_de, label=r'$\bar \Omega_{DE}$',
color='xkcd:aquamarine')
plt.legend()
plt.title(r'$\bar \Omega_{m}$, $\bar \Omega_{DE}$ evolution, model = %s, $\gamma$ = %s'
%(firstderivs_key, gamma))
# ombar_m, ombar_de vs redshift log x axis.
plt.figure()
plt.xlabel('redshift $z$')
plt.ylabel(r'$\bar \Omega $')
plt.grid(True)
plt.semilogx(zpicks, ombar_m, label=r'$\bar \Omega_{m}$',
color='xkcd:coral', linestyle=':')
plt.semilogx(zpicks, ombar_de, label=r'$\bar \Omega_{DE}$',
color='xkcd:aquamarine')
plt.legend()
plt.title(r'$\bar \Omega_{m}$, $\bar \Omega_{DE}$ evolution, model = %s, $\gamma$ = %s'
%(firstderivs_key, gamma))
# Luminosity distance vs redshift.
plt.figure()
plt.xlabel('redshift $z$')
plt.ylabel('$d_L$*($H_0$/c)')
plt.grid(True)
plt.plot(zpicks, dl, 'xkcd:lightgreen', lw=1)
plt.title('$d_L$ evolution, model = %s, $\gamma$ = %s'
%(firstderivs_key, gamma))
# Redshift vs -time.
plt.figure()
plt.xlabel('age')
plt.ylabel('redshift $z$')
plt.grid(True)
plt.plot(t, zpicks, 'm', lw=1)
plt.title('Redshift evolution, model = %s, $\gamma$ = %s'
%(firstderivs_key, gamma))
# Scale factor vs -time.
plt.figure()
plt.xlabel('age')
plt.ylabel('a')
plt.grid(True)
plt.plot(t, a, color='xkcd:crimson', lw=1)
plt.title('Scale factor evolution, model = %s, $\gamma$ = %s'
%(firstderivs_key, gamma))
# Magnitude vs redshift.
plt.figure()
plt.xlabel('redshift $z$')
plt.ylabel('magnitude')
plt.title('Magnitude evolution, model = %s, $\gamma$ = %s'
%(firstderivs_key, gamma))
plt.scatter(zpicks, mag, marker='.', lw='1', c='xkcd:tomato')
plt.show()
return
def paramcheck(mag, zpicks, firstderivs_key, plot_var_dict, param_name):
if firstderivs_key == 'exotic':
t_1, dlpc_1, dl_1, a_1, ombar_m_1, ombar_r_1, gamma_1, zeta_1, ombar_de_1, ombar_m0_1, ombar_r0_1, ombar_de0_1 = plot_var_dict['plot_var_1']
t_2, dlpc_2, dl_2, a_2, ombar_m_2, ombar_r_2, gamma_2, zeta_2, ombar_de_2, ombar_m0_2, ombar_r0_2, ombar_de0_2 = plot_var_dict['plot_var_2']
t_3, dlpc_3, dl_3, a_3, ombar_m_3, ombar_r_3, gamma_3, zeta_3, ombar_de_3, ombar_m0_3, ombar_r0_3, ombar_de0_3 = plot_var_dict['plot_var_3']
else:
t_1, dlpc_1, dl_1, a_1, ombar_m_1, gamma_1, ombar_de_1, ombar_m0_1, ombar_de0_1 = plot_var_dict['plot_var_1']
t_2, dlpc_2, dl_2, a_2, ombar_m_2, gamma_2, ombar_de_2, ombar_m0_2, ombar_de0_2 = plot_var_dict['plot_var_2']
t_3, dlpc_3, dl_3, a_3, ombar_m_3, gamma_3, ombar_de_3, ombar_m0_3, ombar_de0_3 = plot_var_dict['plot_var_3']
if param_name == 'gamma':
param_1, param_2, param_3 = gamma_1, gamma_2, gamma_3
elif param_name == 'zeta':
param_1, param_2, param_3 = zeta_1, zeta_2, zeta_3
# Changing time into age
t_1, t_2, t_3 = -t_1, -t_2, -t_3
mag_1 = plot_var_dict['mag_1']
mag_2 = plot_var_dict['mag_2']
mag_3 = plot_var_dict['mag_3']
# Scale factor vs redshift.
fig, ax = plt.subplots()
plt.xlabel('redshift $z$')
plt.ylabel('a')
plt.grid(True)
ax.plot(zpicks, a_1, 'r:', label=param_1)
ax.plot(zpicks, a_2, 'g-.', label=param_2)
ax.plot(zpicks, a_3, 'b-', label=param_3)
plt.title('Scale factor evolution, model = %s'%(firstderivs_key))
ax.legend()
# H vs redshift.
Hz_1 = (ombar_m_1 + ombar_de_1)**(1/2)
Hz_2 = (ombar_m_2 + ombar_de_2)**(1/2)
Hz_3 = (ombar_m_3 + ombar_de_3)**(1/2)
fig, ax = plt.subplots()
plt.xlabel('redshift $z$')
plt.ylabel('H')
plt.grid(True)
ax.plot(zpicks, Hz_1, 'r:', label=param_1)
ax.plot(zpicks, Hz_2, 'g-.', label=param_2)
ax.plot(zpicks, Hz_3, 'b-', label=param_3)
plt.title('H evolution, model = %s'%(firstderivs_key))
ax.legend()
# ombar_m, ombar_de vs redshift.
fig, ax = plt.subplots()
plt.xlabel('redshift $z$')
plt.ylabel(r'$\bar \Omega $')
plt.grid(True)
ax.plot(zpicks, ombar_m_1, 'r:', label='m, param = %s'%(param_1))
ax.plot(zpicks, ombar_m_2, 'g-.', label='m, param = %s'%(param_2))
ax.plot(zpicks, ombar_m_3, 'b-', label='m, param = %s'%(param_3))
ax.plot(zpicks, ombar_de_1, 'm:', label='de, param = %s'%(param_1))
ax.plot(zpicks, ombar_de_2, 'k-.', label='de, param = %s'%(param_2))
ax.plot(zpicks, ombar_de_3, 'c-', label='de, param = %s'%(param_3))
plt.title(r'$\bar \Omega_{m}$, $\bar \Omega_{DE}$ evolution')
ax.legend()
# ombar_m, ombar_de vs redshift log x axis.
fig, ax = plt.subplots()
plt.xlabel('redshift $z$')
plt.ylabel(r'$\bar \Omega $')
plt.grid(True)
ax.semilogx(zpicks, ombar_m_1, 'r:', label='m, param = %s'%(param_1))
ax.semilogx(zpicks, ombar_m_2, 'g-.', label='m, param = %s'%(param_2))
ax.semilogx(zpicks, ombar_m_3, 'b-', label='m, param = %s'%(param_3))
ax.semilogx(zpicks, ombar_de_1, 'm:', label='de, param = %s'%(param_1))
ax.semilogx(zpicks, ombar_de_2, 'k-.', label='de, param = %s'%(param_2))
ax.semilogx(zpicks, ombar_de_3, 'c-', label='de, param = %s'%(param_3))
plt.title(r'$\bar \Omega_{m}$, $\bar \Omega_{DE}$ evolution')
ax.legend()
# ombar_m vs redshift.
fig, ax = plt.subplots()
plt.xlabel('redshift $z$')
plt.ylabel('$\Omega_{m}$')
plt.grid(True)
ax.plot(zpicks, ombar_m_1, 'r:', label=param_1)
ax.plot(zpicks, ombar_m_2, 'g-.', label=param_2)
ax.plot(zpicks, ombar_m_3, 'b-', label=param_3)
plt.title('$\Omega_{m}$ evolution, model = %s'%(firstderivs_key))
ax.legend()
# ombar_de vs redshift.
fig, ax = plt.subplots()
plt.xlabel('redshift $z$')
plt.ylabel('$\Omega_{DE}$')
plt.grid(True)
ax.plot(zpicks, ombar_de_1, 'r:', label=param_1)
ax.plot(zpicks, ombar_de_2, 'g-.', label=param_2)
ax.plot(zpicks, ombar_de_3, 'b-', label=param_3)
plt.title('$\Omega_{DE}$ evolution, model = %s'%(firstderivs_key))
ax.legend()
# Luminosity distance vs redshift.
fig, ax = plt.subplots()
plt.xlabel('redshift $z$')
plt.ylabel('$d_L$*($H_0$/c)')
plt.grid(True)
ax.plot(zpicks, dl_1, 'r:', label=param_1)
ax.plot(zpicks, dl_2, 'g-.', label=param_2)
ax.plot(zpicks, dl_3, 'b-', label=param_3)
plt.title('$d_L$ evolution, model = %s'%(firstderivs_key))
ax.legend()
# Redshift vs age.
fig, ax = plt.subplots()
plt.xlabel('age')
plt.ylabel('redshift $z$')
plt.grid(True)
ax.plot(t_1, zpicks, 'r:', label=param_1)
ax.plot(t_2, zpicks, 'g-.', label=param_2)
ax.plot(t_3, zpicks, 'b-', label=param_3)
plt.title('Redshift evolution, model = %s'%(firstderivs_key))
ax.legend()
# Scale factor vs -time.
fig, ax = plt.subplots()
plt.xlabel('age')
plt.ylabel('a')
plt.grid(True)
ax.plot(t_1, a_1, 'r:', label=param_1)
ax.plot(t_2, a_2, 'g-.', label=param_2)
ax.plot(t_3, a_3, 'b-', label=param_3)
plt.title('Scale factor evolution, model = %s'%(firstderivs_key))
ax.legend()
# Magnitude vs redshift.
fig, ax = plt.subplots()
plt.xlabel('redshift $z$')
plt.ylabel('magnitude')
plt.title('Magnitude evolution, model = %s'%(firstderivs_key))
plt.scatter(zpicks, mag_1, c='r', marker=',', lw=0.1, label=param_1)
plt.scatter(zpicks, mag_2, c='g', marker='x', lw=0.1, label=param_2)
plt.scatter(zpicks, mag_3, c='b', marker='.', lw=0.2, label=param_3)
ax.legend()
plt.show()
return
|
|
import distutils
import logging
import os
import time
from shlex import quote
from ray import services
from ray.tune.cluster_info import get_ssh_key, get_ssh_user
from ray.tune.sync_client import (CommandBasedClient, get_sync_client,
get_cloud_sync_client, NOOP)
logger = logging.getLogger(__name__)
SYNC_PERIOD = 300
_log_sync_warned = False
_syncers = {}
def wait_for_sync():
for syncer in _syncers.values():
syncer.wait()
def log_sync_template(options=""):
"""Template enabling syncs between driver and worker when possible.
Requires ray cluster to be started with the autoscaler. Also requires
rsync to be installed.
Args:
options (str): Additional rsync options.
Returns:
Sync template with source and target parameters. None if rsync
unavailable.
"""
if not distutils.spawn.find_executable("rsync"):
logger.error("Log sync requires rsync to be installed.")
return None
global _log_sync_warned
ssh_key = get_ssh_key()
if ssh_key is None:
if not _log_sync_warned:
logger.debug("Log sync requires cluster to be setup with "
"`ray up`.")
_log_sync_warned = True
return None
rsh = "ssh -i {ssh_key} -o ConnectTimeout=120s -o StrictHostKeyChecking=no"
rsh = rsh.format(ssh_key=quote(ssh_key))
template = "rsync {options} -savz -e {rsh} {{source}} {{target}}"
return template.format(options=options, rsh=quote(rsh))
class Syncer:
def __init__(self, local_dir, remote_dir, sync_client=NOOP):
"""Syncs between two directories with the sync_function.
Arguments:
local_dir (str): Directory to sync. Uniquely identifies the syncer.
remote_dir (str): Remote directory to sync with.
sync_client (SyncClient): Client for syncing between local_dir and
remote_dir. Defaults to a Noop.
"""
self._local_dir = (os.path.join(local_dir, "")
if local_dir else local_dir)
self._remote_dir = remote_dir
self.last_sync_up_time = float("-inf")
self.last_sync_down_time = float("-inf")
self.sync_client = sync_client
def sync_up_if_needed(self):
if time.time() - self.last_sync_up_time > SYNC_PERIOD:
self.sync_up()
def sync_down_if_needed(self):
if time.time() - self.last_sync_down_time > SYNC_PERIOD:
self.sync_down()
def sync_up(self):
"""Attempts to start the sync-up to the remote path.
Returns:
Whether the sync (if feasible) was successfully started.
"""
result = False
if self.validate_hosts(self._local_dir, self._remote_path):
try:
result = self.sync_client.sync_up(self._local_dir,
self._remote_path)
self.last_sync_up_time = time.time()
except Exception:
logger.exception("Sync execution failed.")
return result
def sync_down(self):
"""Attempts to start the sync-down from the remote path.
Returns:
Whether the sync (if feasible) was successfully started.
"""
result = False
if self.validate_hosts(self._local_dir, self._remote_path):
try:
result = self.sync_client.sync_down(self._remote_path,
self._local_dir)
self.last_sync_down_time = time.time()
except Exception:
logger.exception("Sync execution failed.")
return result
def validate_hosts(self, source, target):
if not (source and target):
logger.debug("Source or target is empty, skipping log sync for "
"{}".format(self._local_dir))
return False
return True
def wait(self):
"""Waits for the sync client to complete the current sync."""
self.sync_client.wait()
def reset(self):
self.last_sync_up_time = float("-inf")
self.last_sync_down_time = float("-inf")
self.sync_client.reset()
@property
def _remote_path(self):
return self._remote_dir
class NodeSyncer(Syncer):
"""Syncer for syncing files to/from a remote dir to a local dir."""
def __init__(self, local_dir, remote_dir, sync_client):
self.local_ip = services.get_node_ip_address()
self.worker_ip = None
super(NodeSyncer, self).__init__(local_dir, remote_dir, sync_client)
def set_worker_ip(self, worker_ip):
"""Sets the worker IP to sync logs from."""
self.worker_ip = worker_ip
def has_remote_target(self):
"""Returns whether the Syncer has a remote target."""
if not self.worker_ip:
logger.debug("Worker IP unknown, skipping sync for %s",
self._local_dir)
return False
if self.worker_ip == self.local_ip:
logger.debug("Worker IP is local IP, skipping sync for %s",
self._local_dir)
return False
return True
def sync_up_if_needed(self):
if not self.has_remote_target():
return True
return super(NodeSyncer, self).sync_up_if_needed()
def sync_down_if_needed(self):
if not self.has_remote_target():
return True
return super(NodeSyncer, self).sync_down_if_needed()
def sync_up_to_new_location(self, worker_ip):
if worker_ip != self.worker_ip:
logger.debug("Setting new worker IP to %s", worker_ip)
self.set_worker_ip(worker_ip)
self.reset()
if not self.sync_up():
logger.warning(
"Sync up to new location skipped. This should not occur.")
else:
logger.warning("Sync attempted to same IP %s.", worker_ip)
def sync_up(self):
if not self.has_remote_target():
return True
return super(NodeSyncer, self).sync_up()
def sync_down(self):
if not self.has_remote_target():
return True
logger.debug("Syncing from %s to %s", self._remote_path,
self._local_dir)
return super(NodeSyncer, self).sync_down()
@property
def _remote_path(self):
ssh_user = get_ssh_user()
global _log_sync_warned
if not self.has_remote_target():
return None
if ssh_user is None:
if not _log_sync_warned:
logger.error("Syncer requires cluster to be setup with "
"`ray up`.")
_log_sync_warned = True
return None
return "{}@{}:{}/".format(ssh_user, self.worker_ip, self._remote_dir)
def get_cloud_syncer(local_dir, remote_dir=None, sync_function=None):
"""Returns a Syncer.
This syncer is in charge of syncing the local_dir with upload_dir.
Args:
local_dir (str): Source directory for syncing.
remote_dir (str): Target directory for syncing. If not provided, a
no-op Syncer is returned.
sync_function (func | str): Function for syncing the local_dir to
remote_dir. If string, then it must be a string template for
syncer to run. If not provided, it defaults
to standard S3 or gsutil sync commands.
Raises:
ValueError if malformed remote_dir.
"""
key = (local_dir, remote_dir)
if key in _syncers:
return _syncers[key]
if not remote_dir:
_syncers[key] = Syncer(local_dir, remote_dir, NOOP)
return _syncers[key]
client = get_sync_client(sync_function)
if client:
_syncers[key] = Syncer(local_dir, remote_dir, client)
return _syncers[key]
sync_client = get_cloud_sync_client(remote_dir)
_syncers[key] = Syncer(local_dir, remote_dir, sync_client)
return _syncers[key]
def get_node_syncer(local_dir, remote_dir=None, sync_function=None):
"""Returns a NodeSyncer.
Args:
local_dir (str): Source directory for syncing.
remote_dir (str): Target directory for syncing. If not provided, a
noop Syncer is returned.
sync_function (func|str|bool): Function for syncing the local_dir to
remote_dir. If string, then it must be a string template for
syncer to run. If True or not provided, it defaults rsync. If
False, a noop Syncer is returned.
"""
key = (local_dir, remote_dir)
if key in _syncers:
return _syncers[key]
elif not remote_dir or sync_function is False:
sync_client = NOOP
elif sync_function and sync_function is not True:
sync_client = get_sync_client(sync_function)
else:
sync = log_sync_template()
if sync:
sync_client = CommandBasedClient(sync, sync)
sync_client.set_logdir(local_dir)
else:
sync_client = NOOP
_syncers[key] = NodeSyncer(local_dir, remote_dir, sync_client)
return _syncers[key]
|
|
"""Support for AlarmDecoder devices."""
from datetime import timedelta
import logging
from adext import AdExt
from alarmdecoder.devices import SerialDevice, SocketDevice, USBDevice
from alarmdecoder.util import NoDeviceError
import voluptuous as vol
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.const import CONF_HOST, EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "alarmdecoder"
DATA_AD = "alarmdecoder"
CONF_DEVICE = "device"
CONF_DEVICE_BAUD = "baudrate"
CONF_DEVICE_PATH = "path"
CONF_DEVICE_PORT = "port"
CONF_DEVICE_TYPE = "type"
CONF_AUTO_BYPASS = "autobypass"
CONF_PANEL_DISPLAY = "panel_display"
CONF_ZONE_NAME = "name"
CONF_ZONE_TYPE = "type"
CONF_ZONE_LOOP = "loop"
CONF_ZONE_RFID = "rfid"
CONF_ZONES = "zones"
CONF_RELAY_ADDR = "relayaddr"
CONF_RELAY_CHAN = "relaychan"
CONF_CODE_ARM_REQUIRED = "code_arm_required"
DEFAULT_DEVICE_TYPE = "socket"
DEFAULT_DEVICE_HOST = "localhost"
DEFAULT_DEVICE_PORT = 10000
DEFAULT_DEVICE_PATH = "/dev/ttyUSB0"
DEFAULT_DEVICE_BAUD = 115200
DEFAULT_AUTO_BYPASS = False
DEFAULT_PANEL_DISPLAY = False
DEFAULT_CODE_ARM_REQUIRED = True
DEFAULT_ZONE_TYPE = "opening"
SIGNAL_PANEL_MESSAGE = "alarmdecoder.panel_message"
SIGNAL_PANEL_ARM_AWAY = "alarmdecoder.panel_arm_away"
SIGNAL_PANEL_ARM_HOME = "alarmdecoder.panel_arm_home"
SIGNAL_PANEL_DISARM = "alarmdecoder.panel_disarm"
SIGNAL_ZONE_FAULT = "alarmdecoder.zone_fault"
SIGNAL_ZONE_RESTORE = "alarmdecoder.zone_restore"
SIGNAL_RFX_MESSAGE = "alarmdecoder.rfx_message"
SIGNAL_REL_MESSAGE = "alarmdecoder.rel_message"
DEVICE_SOCKET_SCHEMA = vol.Schema(
{
vol.Required(CONF_DEVICE_TYPE): "socket",
vol.Optional(CONF_HOST, default=DEFAULT_DEVICE_HOST): cv.string,
vol.Optional(CONF_DEVICE_PORT, default=DEFAULT_DEVICE_PORT): cv.port,
}
)
DEVICE_SERIAL_SCHEMA = vol.Schema(
{
vol.Required(CONF_DEVICE_TYPE): "serial",
vol.Optional(CONF_DEVICE_PATH, default=DEFAULT_DEVICE_PATH): cv.string,
vol.Optional(CONF_DEVICE_BAUD, default=DEFAULT_DEVICE_BAUD): cv.string,
}
)
DEVICE_USB_SCHEMA = vol.Schema({vol.Required(CONF_DEVICE_TYPE): "usb"})
ZONE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE_NAME): cv.string,
vol.Optional(CONF_ZONE_TYPE, default=DEFAULT_ZONE_TYPE): vol.Any(
DEVICE_CLASSES_SCHEMA
),
vol.Optional(CONF_ZONE_RFID): cv.string,
vol.Optional(CONF_ZONE_LOOP): vol.All(vol.Coerce(int), vol.Range(min=1, max=4)),
vol.Inclusive(
CONF_RELAY_ADDR,
"relaylocation",
"Relay address and channel must exist together",
): cv.byte,
vol.Inclusive(
CONF_RELAY_CHAN,
"relaylocation",
"Relay address and channel must exist together",
): cv.byte,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_DEVICE): vol.Any(
DEVICE_SOCKET_SCHEMA, DEVICE_SERIAL_SCHEMA, DEVICE_USB_SCHEMA
),
vol.Optional(
CONF_PANEL_DISPLAY, default=DEFAULT_PANEL_DISPLAY
): cv.boolean,
vol.Optional(CONF_AUTO_BYPASS, default=DEFAULT_AUTO_BYPASS): cv.boolean,
vol.Optional(
CONF_CODE_ARM_REQUIRED, default=DEFAULT_CODE_ARM_REQUIRED
): cv.boolean,
vol.Optional(CONF_ZONES): {vol.Coerce(int): ZONE_SCHEMA},
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up for the AlarmDecoder devices."""
conf = config.get(DOMAIN)
restart = False
device = conf[CONF_DEVICE]
display = conf[CONF_PANEL_DISPLAY]
auto_bypass = conf[CONF_AUTO_BYPASS]
code_arm_required = conf[CONF_CODE_ARM_REQUIRED]
zones = conf.get(CONF_ZONES)
device_type = device[CONF_DEVICE_TYPE]
host = DEFAULT_DEVICE_HOST
port = DEFAULT_DEVICE_PORT
path = DEFAULT_DEVICE_PATH
baud = DEFAULT_DEVICE_BAUD
def stop_alarmdecoder(event):
"""Handle the shutdown of AlarmDecoder."""
_LOGGER.debug("Shutting down alarmdecoder")
nonlocal restart
restart = False
controller.close()
def open_connection(now=None):
"""Open a connection to AlarmDecoder."""
nonlocal restart
try:
controller.open(baud)
except NoDeviceError:
_LOGGER.debug("Failed to connect. Retrying in 5 seconds")
hass.helpers.event.track_point_in_time(
open_connection, dt_util.utcnow() + timedelta(seconds=5)
)
return
_LOGGER.debug("Established a connection with the alarmdecoder")
restart = True
def handle_closed_connection(event):
"""Restart after unexpected loss of connection."""
nonlocal restart
if not restart:
return
restart = False
_LOGGER.warning("AlarmDecoder unexpectedly lost connection")
hass.add_job(open_connection)
def handle_message(sender, message):
"""Handle message from AlarmDecoder."""
hass.helpers.dispatcher.dispatcher_send(SIGNAL_PANEL_MESSAGE, message)
def handle_rfx_message(sender, message):
"""Handle RFX message from AlarmDecoder."""
hass.helpers.dispatcher.dispatcher_send(SIGNAL_RFX_MESSAGE, message)
def zone_fault_callback(sender, zone):
"""Handle zone fault from AlarmDecoder."""
hass.helpers.dispatcher.dispatcher_send(SIGNAL_ZONE_FAULT, zone)
def zone_restore_callback(sender, zone):
"""Handle zone restore from AlarmDecoder."""
hass.helpers.dispatcher.dispatcher_send(SIGNAL_ZONE_RESTORE, zone)
def handle_rel_message(sender, message):
"""Handle relay or zone expander message from AlarmDecoder."""
hass.helpers.dispatcher.dispatcher_send(SIGNAL_REL_MESSAGE, message)
controller = False
if device_type == "socket":
host = device[CONF_HOST]
port = device[CONF_DEVICE_PORT]
controller = AdExt(SocketDevice(interface=(host, port)))
elif device_type == "serial":
path = device[CONF_DEVICE_PATH]
baud = device[CONF_DEVICE_BAUD]
controller = AdExt(SerialDevice(interface=path))
elif device_type == "usb":
AdExt(USBDevice.find())
return False
controller.on_message += handle_message
controller.on_rfx_message += handle_rfx_message
controller.on_zone_fault += zone_fault_callback
controller.on_zone_restore += zone_restore_callback
controller.on_close += handle_closed_connection
controller.on_expander_message += handle_rel_message
hass.data[DATA_AD] = controller
open_connection()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_alarmdecoder)
load_platform(
hass,
"alarm_control_panel",
DOMAIN,
{CONF_AUTO_BYPASS: auto_bypass, CONF_CODE_ARM_REQUIRED: code_arm_required},
config,
)
if zones:
load_platform(hass, "binary_sensor", DOMAIN, {CONF_ZONES: zones}, config)
if display:
load_platform(hass, "sensor", DOMAIN, conf, config)
return True
|
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2019 the HERA Collaboration
# Licensed under the 2-clause BSD license.
from sqlalchemy import Column, Integer, String
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy import ForeignKey
from ..db_check import is_valid_database
from ..db_check import check_connection
from .. import mc
def gen_test_model():
Base = declarative_base()
class ValidTestModel(Base):
"""A sample SQLAlchemy model to demostrate db conflicts. """
__tablename__ = "validity_check_test"
#: Running counter used in foreign key references
id = Column(Integer, primary_key=True)
return Base, ValidTestModel
def gen_relation_models():
Base = declarative_base()
class RelationTestModel(Base):
__tablename__ = "validity_check_test_2"
id = Column(Integer, primary_key=True)
class RelationTestModel2(Base):
__tablename__ = "validity_check_test_3"
id = Column(Integer, primary_key=True)
test_relationship_id = Column(ForeignKey("validity_check_test_2.id"))
test_relationship = relationship(RelationTestModel,
primaryjoin=test_relationship_id
== RelationTestModel.id)
return Base, RelationTestModel, RelationTestModel2
def gen_declarative():
Base = declarative_base()
class DeclarativeTestModel(Base):
__tablename__ = "validity_check_test_4"
id = Column(Integer, primary_key=True)
@declared_attr
def _password(self):
return Column('password', String(256), nullable=False)
@hybrid_property
def password(self):
return self._password
return Base, DeclarativeTestModel
def test_argparser():
ap = mc.get_mc_argument_parser()
assert ap.description is None
def test_validity_pass():
"""
See database validity check completes when tables and columns are created.
"""
engine = mc.connect_to_mc_testing_db().engine
conn = engine.connect()
conn.begin()
Base, ValidTestModel = gen_test_model()
Session = sessionmaker(bind=engine)
session = Session()
try:
Base.metadata.drop_all(engine, tables=[ValidTestModel.__table__])
except sqlalchemy.exc.NoSuchTableError:
pass
base_is_none = is_valid_database(None, session)
assert base_is_none
Base.metadata.create_all(engine, tables=[ValidTestModel.__table__])
try:
assert is_valid_database(Base, session) is True
finally:
Base.metadata.drop_all(engine)
def test_validity_table_missing():
"""See check fails when there is a missing table"""
engine = mc.connect_to_mc_testing_db().engine
conn = engine.connect()
conn.begin()
Base, ValidTestModel = gen_test_model()
Session = sessionmaker(bind=engine)
session = Session()
try:
Base.metadata.drop_all(engine, tables=[ValidTestModel.__table__])
except sqlalchemy.exc.NoSuchTableError:
pass
assert is_valid_database(Base, session) is False
def test_validity_column_missing():
"""See check fails when there is a missing table"""
engine = mc.connect_to_mc_testing_db().engine
conn = engine.connect()
conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
Base, ValidTestModel = gen_test_model()
try:
Base.metadata.drop_all(engine, tables=[ValidTestModel.__table__])
except sqlalchemy.exc.NoSuchTableError:
pass
Base.metadata.create_all(engine, tables=[ValidTestModel.__table__])
# Delete one of the columns
engine.execute("ALTER TABLE validity_check_test DROP COLUMN id")
assert is_valid_database(Base, session) is False
def test_validity_pass_relationship():
"""
See database validity check understands about relationships and don't
deem them as missing column.
"""
engine = mc.connect_to_mc_testing_db().engine
conn = engine.connect()
conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
Base, RelationTestModel, RelationTestModel2 = gen_relation_models()
try:
Base.metadata.drop_all(engine, tables=[RelationTestModel.__table__,
RelationTestModel2.__table__])
except sqlalchemy.exc.NoSuchTableError:
pass
Base.metadata.create_all(engine, tables=[RelationTestModel.__table__,
RelationTestModel2.__table__])
try:
assert is_valid_database(Base, session) is True
finally:
Base.metadata.drop_all(engine)
def test_validity_pass_declarative():
"""
See database validity check understands about relationships and don't deem
them as missing column.
"""
engine = mc.connect_to_mc_testing_db().engine
conn = engine.connect()
conn.begin()
Session = sessionmaker(bind=engine)
session = Session()
Base, DeclarativeTestModel = gen_declarative()
try:
Base.metadata.drop_all(engine, tables=[DeclarativeTestModel.__table__])
except sqlalchemy.exc.NoSuchTableError:
pass
Base.metadata.create_all(engine, tables=[DeclarativeTestModel.__table__])
try:
assert is_valid_database(Base, session) is True
finally:
Base.metadata.drop_all(engine)
def test_check_connection():
"""Check that a missing database raises appropriate exception."""
# Create database connection with fake url
db = mc.DeclarativeDB('postgresql://hera@localhost/foo')
with db.sessionmaker() as s:
assert check_connection(s) is False
|
|
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from toontown.toonbase.ToontownGlobals import GlobalDialogColor
from DistributedMinigame import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownTimer
import TravelGameGlobals
import math
from pandac.PandaModules import rad2Deg
from toontown.toontowngui import TTDialog
from direct.interval.IntervalGlobal import *
import VoteResultsPanel
import VoteResultsTrolleyPanel
IconDict = {ToontownGlobals.RaceGameId: 'mg_trolley_sign_race',
ToontownGlobals.CannonGameId: 'mg_trolley_sign_cannon',
ToontownGlobals.TagGameId: 'mg_trolley_sign_tag',
ToontownGlobals.PatternGameId: 'mg_trolley_sign_minnie',
ToontownGlobals.RingGameId: 'mg_trolley_sign_ring',
ToontownGlobals.MazeGameId: 'mg_trolley_sign_maze',
ToontownGlobals.TugOfWarGameId: 'mg_trolley_sign_tugawar',
ToontownGlobals.CatchGameId: 'mg_trolley_sign_catch',
ToontownGlobals.DivingGameId: 'mg_trolley_sign_dive',
ToontownGlobals.TargetGameId: 'mg_trolley_sign_umbrella',
ToontownGlobals.PairingGameId: 'mg_trolley_sign_card',
ToontownGlobals.VineGameId: 'mg_trolley_sign_vine',
ToontownGlobals.IceGameId: 'mg_trolley_sign_ice',
ToontownGlobals.PhotoGameId: 'mg_trolley_sign_photo',
ToontownGlobals.TwoDGameId: 'mg_trolley_sign_2d',
ToontownGlobals.CogThiefGameId: 'mg_trolley_sign_theif'}
MinigameNameDict = {ToontownGlobals.RaceGameId: TTLocalizer.RaceGameTitle,
ToontownGlobals.CannonGameId: TTLocalizer.CannonGameTitle,
ToontownGlobals.TagGameId: TTLocalizer.TagGameTitle,
ToontownGlobals.PatternGameId: TTLocalizer.PatternGameTitle,
ToontownGlobals.RingGameId: TTLocalizer.RingGameTitle,
ToontownGlobals.MazeGameId: TTLocalizer.MazeGameTitle,
ToontownGlobals.TugOfWarGameId: TTLocalizer.TugOfWarGameTitle,
ToontownGlobals.CatchGameId: TTLocalizer.CatchGameTitle,
ToontownGlobals.DivingGameId: TTLocalizer.DivingGameTitle,
ToontownGlobals.TargetGameId: TTLocalizer.TargetGameTitle,
ToontownGlobals.PairingGameId: TTLocalizer.PairingGameTitle,
ToontownGlobals.VineGameId: TTLocalizer.VineGameTitle,
ToontownGlobals.TravelGameId: TTLocalizer.TravelGameTitle,
ToontownGlobals.IceGameId: TTLocalizer.IceGameTitle,
ToontownGlobals.PhotoGameId: TTLocalizer.PhotoGameTitle,
ToontownGlobals.TwoDGameId: TTLocalizer.TwoDGameTitle,
ToontownGlobals.CogThiefGameId: TTLocalizer.CogThiefGameTitle}
def makeLabel(itemName, itemNum, *extraArgs):
intVersion = int(itemName)
if intVersion < 0:
textColor = Vec4(0, 0, 1, 1)
intVersion = -intVersion
elif intVersion == 0:
textColor = Vec4(0, 0, 0, 1)
else:
textColor = Vec4(1, 0, 0, 1)
return DirectLabel(text=str(intVersion), text_fg=textColor, relief=DGG.RIDGE, frameSize=(-1.2,
1.2,
-0.225,
0.8), scale=1.0)
def map3dToAspect2d(node, point):
p3 = base.cam.getRelativePoint(node, point)
p2 = Point2()
if not base.camLens.project(p3, p2):
return None
r2d = Point3(p2[0], 0, p2[1])
a2d = aspect2d.getRelativePoint(render2d, r2d)
return a2d
def invertTable(table):
index = {}
for key in table.keys():
value = table[key]
if not index.has_key(value):
index[value] = key
return index
class DistributedTravelGame(DistributedMinigame):
notify = directNotify.newCategory('DistributedTravelGame')
idToNames = MinigameNameDict
TrolleyMoveDuration = 3
UseTrolleyResultsPanel = True
FlyCameraUp = True
FocusOnTrolleyWhileMovingUp = False
def __init__(self, cr):
DistributedMinigame.__init__(self, cr)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedTravelGame', [State.State('off', self.enterOff, self.exitOff, ['inputChoice']),
State.State('inputChoice', self.enterInputChoice, self.exitInputChoice, ['waitServerChoices', 'displayVotes', 'cleanup']),
State.State('waitServerChoices', self.enterWaitServerChoices, self.exitWaitServerChoices, ['displayVotes', 'cleanup']),
State.State('displayVotes', self.enterDisplayVotes, self.exitDisplayVotes, ['moveTrolley', 'cleanup']),
State.State('moveTrolley', self.enterMoveTrolley, self.exitMoveTrolley, ['inputChoice', 'winMovie', 'cleanup']),
State.State('winMovie', self.enterWinMovie, self.exitWinMovie, ['cleanup']),
State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'off', 'cleanup')
self.addChildGameFSM(self.gameFSM)
self.currentVotes = {}
self.cameraTopView = (100, -20, 280, 0, -89, 0)
self.timer = None
self.timerStartTime = None
self.currentSwitch = 0
self.destSwitch = 0
self.minigameLabels = []
self.minigameIcons = []
self.bonusLabels = []
self.trolleyAwaySfx = base.loadSfx('phase_4/audio/sfx/SZ_trolley_away.mp3')
self.trolleyBellSfx = base.loadSfx('phase_4/audio/sfx/SZ_trolley_bell.mp3')
self.turntableRotateSfx = base.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_turntble_rotate_2.mp3')
self.wonGameSfx = base.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_bonus.mp3')
self.lostGameSfx = base.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_no_bonus_2.mp3')
self.noWinnerSfx = base.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_no_bonus.mp3')
self.boardIndex = 0
self.avNames = []
self.disconnectedAvIds = []
return
def getTitle(self):
return TTLocalizer.TravelGameTitle
def getInstructions(self):
return TTLocalizer.TravelGameInstructions
def getMaxDuration(self):
return 0
def load(self):
self.notify.debug('load')
DistributedMinigame.load(self)
self.sky = loader.loadModel('phase_3.5/models/props/TT_sky')
self.gameBoard = loader.loadModel('phase_4/models/minigames/toon_cannon_gameground')
self.gameBoard.setPosHpr(100, 0, 0, 0, 0, 0)
self.gameBoard.setScale(1.0)
station = loader.loadModel('phase_4/models/modules/trolley_station_TT.bam')
self.trolleyCar = station.find('**/trolley_car')
self.trolleyCar.reparentTo(hidden)
self.trolleyCarOrigPos = self.trolleyCar.getPos()
self.trolleyCarOrigHpr = self.trolleyCar.getHpr()
self.trolleyCar.setPosHpr(0, 0, 0, 0, 0, 0)
self.trolleyCar.setScale(1.0)
self.trolleyCar.setX(self.trolleyCar.getX() - TravelGameGlobals.xInc)
station.removeNode()
self.keys = self.trolleyCar.findAllMatches('**/key')
self.numKeys = self.keys.getNumPaths()
self.keyInit = []
self.keyRef = []
for i in range(self.numKeys):
key = self.keys[i]
key.setTwoSided(1)
ref = self.trolleyCar.attachNewNode('key' + `i` + 'ref')
ref.iPosHpr(key)
self.keyRef.append(ref)
self.keyInit.append(key.getTransform())
self.frontWheels = self.trolleyCar.findAllMatches('**/front_wheels')
self.numFrontWheels = self.frontWheels.getNumPaths()
self.frontWheelInit = []
self.frontWheelRef = []
for i in range(self.numFrontWheels):
wheel = self.frontWheels[i]
ref = self.trolleyCar.attachNewNode('frontWheel' + `i` + 'ref')
ref.iPosHpr(wheel)
self.frontWheelRef.append(ref)
self.frontWheelInit.append(wheel.getTransform())
self.backWheels = self.trolleyCar.findAllMatches('**/back_wheels')
self.numBackWheels = self.backWheels.getNumPaths()
self.backWheelInit = []
self.backWheelRef = []
for i in range(self.numBackWheels):
wheel = self.backWheels[i]
ref = self.trolleyCar.attachNewNode('backWheel' + `i` + 'ref')
ref.iPosHpr(wheel)
self.backWheelRef.append(ref)
self.backWheelInit.append(wheel.getTransform())
trolleyAnimationReset = Func(self.resetAnimation)
self.trainSwitches = {}
self.trainTracks = {}
self.tunnels = {}
self.extraTrainTracks = []
turnTable = loader.loadModel('phase_4/models/minigames/trolley_game_turntable')
minPoint = Point3(0, 0, 0)
maxPoint = Point3(0, 0, 0)
turnTable.calcTightBounds(minPoint, maxPoint)
self.fullLength = maxPoint[0]
for key in TravelGameGlobals.BoardLayouts[self.boardIndex].keys():
info = TravelGameGlobals.BoardLayouts[self.boardIndex][key]
switchModel = turnTable.find('**/turntable1').copyTo(render)
switchModel.setPos(*info['pos'])
switchModel.reparentTo(hidden)
self.trainSwitches[key] = switchModel
zAdj = 0
for otherSwitch in info['links']:
info2 = TravelGameGlobals.BoardLayouts[self.boardIndex][otherSwitch]
x1, y1, z1 = info['pos']
x2, y2, z2 = info2['pos']
linkKey = (key, otherSwitch)
trainTrack = self.loadTrainTrack(x1, y1, x2, y2, zAdj)
trainTrack.reparentTo(hidden)
self.trainTracks[linkKey] = trainTrack
zAdj += 0.005
rootInfo = TravelGameGlobals.BoardLayouts[self.boardIndex][0]
rootX, rootY, rootZ = rootInfo['pos']
startX = rootX - TravelGameGlobals.xInc
trainTrack = self.loadTrainTrack(startX, rootY, rootX, rootY)
self.extraTrainTracks.append(trainTrack)
tunnelX = None
for key in TravelGameGlobals.BoardLayouts[self.boardIndex].keys():
if self.isLeaf(key):
info = TravelGameGlobals.BoardLayouts[self.boardIndex][key]
switchX, switchY, switchZ = info['pos']
endX = switchX + TravelGameGlobals.xInc
trainTrack = self.loadTrainTrack(switchX, switchY, endX, switchY)
self.extraTrainTracks.append(trainTrack)
tempModel = loader.loadModel('phase_4/models/minigames/trolley_game_turntable')
tunnel = tempModel.find('**/tunnel1')
tunnel.reparentTo(render)
tempModel.removeNode()
if not tunnelX:
minTrackPoint = Point3(0, 0, 0)
maxTrackPoint = Point3(0, 0, 0)
trainTrack.calcTightBounds(minTrackPoint, maxTrackPoint)
tunnelX = maxTrackPoint[0]
tunnel.setPos(tunnelX, switchY, 0)
tunnel.wrtReparentTo(trainTrack)
self.tunnels[key] = tunnel
turnTable.removeNode()
self.loadGui()
self.introMovie = self.getIntroMovie()
self.music = base.loadMusic('phase_4/audio/bgm/MG_Travel.mid')
self.flashWinningBeansTrack = None
return
def loadTrainTrack(self, x1, y1, x2, y2, zAdj = 0):
turnTable = loader.loadModel('phase_4/models/minigames/trolley_game_turntable')
trainPart = turnTable.find('**/track_a2')
trackHeight = 0.03
trainTrack = render.attachNewNode('trainTrack%d%d%d%d' % (x1,
y1,
x2,
y2))
trainTrack.setPos(x1, y1, trackHeight)
xDiff = abs(x2 - x1)
yDiff = abs(y2 - y1)
angleInRadians = math.atan((float(y2) - y1) / (x2 - x1))
angle = rad2Deg(angleInRadians)
desiredLength = math.sqrt(xDiff * xDiff + yDiff * yDiff)
lengthToGo = desiredLength
partIndex = 0
lengthCovered = 0
while lengthToGo > self.fullLength / 2.0:
onePart = trainPart.copyTo(trainTrack)
onePart.setX(lengthCovered)
lengthToGo -= self.fullLength
lengthCovered += self.fullLength
trainTrack.setH(angle)
newX = x1 + (x2 - x1) / 2.0
newY = y1 + (y2 - y1) / 2.0
trainTrack.setPos(x1, y1, trackHeight + zAdj)
turnTable.removeNode()
return trainTrack
def loadGui(self):
scoreText = [str(self.currentVotes[self.localAvId])]
self.gui = DirectFrame()
self.remainingVotesFrame = DirectFrame(parent=self.gui, relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=GlobalDialogColor, geom_scale=(7, 1, 1), pos=(-0.9, 0, 0.8), scale=0.1, text=TTLocalizer.TravelGameRemainingVotes, text_align=TextNode.ALeft, text_scale=TTLocalizer.DTGremainingVotesFrame, text_pos=(-3.4, -0.1, 0.0))
self.localVotesRemaining = DirectLabel(parent=self.remainingVotesFrame, relief=None, text=scoreText, text_fg=VBase4(0, 0.5, 0, 1), text_align=TextNode.ARight, text_scale=0.7, pos=(3.2, 0, -0.15))
guiModel = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
self.choiceFrame = DirectFrame(parent=self.gui, relief=None, pos=(-0.55, 0, -0.85), image=DGG.getDefaultDialogGeom(), image_scale=(1.4, 1, 0.225), image_color=GlobalDialogColor)
self.useLabel = DirectLabel(text=TTLocalizer.TravelGameUse, parent=self.choiceFrame, pos=(-0.59, 0, -0.01), text_scale=TTLocalizer.DTGuseLabel, relief=None)
self.votesPeriodLabel = DirectLabel(text=TTLocalizer.TravelGameVotesWithPeriod, parent=self.choiceFrame, pos=(-0.21, 0, -0.01), text_scale=TTLocalizer.DTGvotesPeriodLabel, relief=None, text_align=TextNode.ALeft)
self.votesToGoLabel = DirectLabel(text=TTLocalizer.TravelGameVotesToGo, parent=self.choiceFrame, pos=(-0.21, 0, -0.01), text_scale=TTLocalizer.DTGvotesToGoLabel, relief=None, text_align=TextNode.ALeft)
self.upLabel = DirectLabel(text=TTLocalizer.TravelGameUp, parent=self.choiceFrame, pos=(0.31, 0, -0.01), text_scale=TTLocalizer.DTGupLabel, text_fg=Vec4(0, 0, 1, 1), relief=None, text_align=TextNode.ALeft)
self.downLabel = DirectLabel(text=TTLocalizer.TravelGameDown, parent=self.choiceFrame, pos=(0.31, 0, -0.01), text_scale=TTLocalizer.DTGdownLabel, text_fg=Vec4(1, 0, 0, 1), relief=None, text_align=TextNode.ALeft)
self.scrollList = DirectScrolledList(parent=self.choiceFrame, relief=None, pos=(-0.36, 0, -0.02), incButton_image=(guiModel.find('**/FndsLst_ScrollUp'),
guiModel.find('**/FndsLst_ScrollDN'),
guiModel.find('**/FndsLst_ScrollUp_Rllvr'),
guiModel.find('**/FndsLst_ScrollUp')), incButton_relief=None, incButton_pos=(0.0, 0.0, -0.04), incButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), incButton_scale=(1.0, 1.0, -1.0), decButton_image=(guiModel.find('**/FndsLst_ScrollUp'),
guiModel.find('**/FndsLst_ScrollDN'),
guiModel.find('**/FndsLst_ScrollUp_Rllvr'),
guiModel.find('**/FndsLst_ScrollUp')), decButton_relief=None, decButton_pos=(0.0, 0.0, 0.095), decButton_image3_color=Vec4(0.6, 0.6, 0.6, 0.6), itemFrame_pos=(0.0, 0.0, 0.0), itemFrame_relief=DGG.GROOVE, numItemsVisible=1, itemMakeFunction=makeLabel, items=[], scrollSpeed=3.0, itemFrame_scale=0.1, command=self.scrollChoiceChanged)
self.putChoicesInScrollList()
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
okImageList = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))
self.voteButton = DirectButton(parent=self.choiceFrame, relief=None, image=okImageList, image_scale=3.0, pos=(0.85, 0, 0.0), text=TTLocalizer.TravelGameVoteWithExclamation, text_scale=TTLocalizer.DTGvoteButton, text_pos=(0, 0), command=self.handleInputChoice)
self.waitingChoicesLabel = DirectLabel(text=TTLocalizer.TravelGameWaitingChoices, text_fg=VBase4(1, 1, 1, 1), relief=None, pos=(-0.2, 0, -0.85), scale=0.075)
self.waitingChoicesLabel.hide()
self.gui.hide()
return
def unload(self):
self.notify.debug('unload')
DistributedMinigame.unload(self)
self.introMovie.finish()
del self.introMovie
self.gameBoard.removeNode()
del self.gameBoard
self.sky.removeNode()
del self.sky
self.trolleyCar.removeNode()
del self.trolleyCar
for key in self.trainSwitches.keys():
self.trainSwitches[key].removeNode()
del self.trainSwitches[key]
self.trainSwitches = {}
for key in self.tunnels.keys():
self.tunnels[key].removeNode()
del self.tunnels[key]
self.tunnels = {}
for key in self.trainTracks.keys():
self.trainTracks[key].removeNode()
del self.trainTracks[key]
self.trainTracks = {}
for trainTrack in self.extraTrainTracks:
trainTrack.removeNode()
del trainTrack
self.extraTrainTracks = []
self.gui.removeNode()
del self.gui
self.waitingChoicesLabel.destroy()
del self.waitingChoicesLabel
if self.flashWinningBeansTrack:
self.flashWinningBeansTrack.finish()
del self.flashWinningBeansTrack
for label in self.minigameLabels:
label.destroy()
del label
self.minigameLabels = []
for icon in self.minigameIcons:
icon.destroy()
icon.removeNode()
self.minigameIcons = []
if hasattr(self, 'mg_icons'):
del self.mg_icons
for label in self.bonusLabels:
label.destroy()
del label
self.bonusLabels = []
self.scrollList.destroy()
del self.scrollList
self.voteButton.destroy()
del self.voteButton
self.removeChildGameFSM(self.gameFSM)
del self.gameFSM
del self.music
def moveCameraToTop(self):
camera.reparentTo(render)
p = self.cameraTopView
camera.setPosHpr(p[0], p[1], p[2], p[3], p[4], p[5])
def moveCameraToTrolley(self):
camera.reparentTo(self.trolleyCar)
camera.setPos(-25, 0, 7.5)
camera.setHpr(-90, 0, 0)
def onstage(self):
self.notify.debug('onstage')
NametagGlobals.setOnscreenChatForced(1)
DistributedMinigame.onstage(self)
self.gameBoard.reparentTo(render)
self.sky.reparentTo(render)
self.moveCameraToTop()
self.trolleyCar.reparentTo(render)
for key in self.trainSwitches.keys():
self.trainSwitches[key].reparentTo(render)
for key in self.trainTracks.keys():
self.trainTracks[key].reparentTo(render)
for trainTrack in self.extraTrainTracks:
trainTrack.reparentTo(render)
base.transitions.irisIn(0.4)
base.setBackgroundColor(0.1875, 0.7929, 0)
base.playMusic(self.music, looping=1, volume=0.9)
self.introMovie.start()
def offstage(self):
self.notify.debug('offstage')
NametagGlobals.setOnscreenChatForced(0)
base.setBackgroundColor(ToontownGlobals.DefaultBackgroundColor)
self.introMovie.finish()
self.gameBoard.hide()
self.sky.hide()
self.trolleyCar.hide()
self.gui.hide()
self.hideMinigamesAndBonuses()
for key in self.trainSwitches.keys():
self.trainSwitches[key].hide()
for key in self.trainTracks.keys():
self.trainTracks[key].hide()
for trainTrack in self.extraTrainTracks:
trainTrack.hide()
DistributedMinigame.offstage(self)
if base.localAvatar.laffMeter:
base.localAvatar.laffMeter.start()
self.music.stop()
def setGameReady(self):
if not self.hasLocalToon:
return
self.notify.debug('setGameReady')
if DistributedMinigame.setGameReady(self):
return
for index in range(self.numPlayers):
avId = self.avIdList[index]
name = ''
avatar = self.getAvatar(avId)
if avatar:
avatar.reparentTo(self.trolleyCar)
avatar.animFSM.request('Sit')
avatar.setPosHpr(-4, -4.5 + index * 3, 2.8, 90, 0, 0)
name = avatar.getName()
self.avNames.append(name)
self.trolleyCar.setH(90)
def setGameStart(self, timestamp):
if not self.hasLocalToon:
return
self.notify.debug('setGameStart')
DistributedMinigame.setGameStart(self, timestamp)
self.introMovie.finish()
self.gameFSM.request('inputChoice')
def enterOff(self):
self.notify.debug('enterOff')
def exitOff(self):
pass
def enterInputChoice(self):
self.notify.debug('enterInputChoice')
NametagGlobals.setOnscreenChatForced(1)
self.timer = ToontownTimer.ToontownTimer()
self.timer.hide()
if self.timerStartTime != None:
self.startTimer()
if base.localAvatar.laffMeter:
base.localAvatar.laffMeter.stop()
self.gui.show()
self.showMinigamesAndBonuses()
return
def exitInputChoice(self):
NametagGlobals.setOnscreenChatForced(0)
if self.timer != None:
self.timer.destroy()
self.timer = None
self.timerStartTime = None
self.gui.hide()
return
def enterWaitServerChoices(self):
self.notify.debug('enterWaitServerChoices')
self.waitingChoicesLabel.show()
self.gui.hide()
def exitWaitServerChoices(self):
self.waitingChoicesLabel.hide()
def enterDisplayVotes(self, votes, directions, directionToGo, directionReason):
if self.UseTrolleyResultsPanel:
self.moveCameraToTrolley()
self.hideMinigamesAndBonuses()
else:
self.moveCameraToTop()
self.resultVotes = votes
self.resultDirections = directions
self.directionToGo = directionToGo
self.directionReason = directionReason
self.resultsStr = ''
directionTotals = [0] * TravelGameGlobals.MaxDirections
for index in range(len(votes)):
if index < len(self.avNames):
avId = self.avIdList[index]
dir = directions[index]
numVotes = votes[index]
directionTotals[dir] += numVotes
curStr = TTLocalizer.TravelGameOneToonVote % {'name': self.avNames[index],
'numVotes': numVotes,
'dir': TTLocalizer.TravelGameDirections[dir]}
if not (numVotes == 0 and avId in self.disconnectedAvIds):
self.resultsStr += curStr
directionStr = TTLocalizer.TravelGameTotals
for index in range(len(directionTotals)):
directionStr += ' ' + TTLocalizer.TravelGameDirections[index] + ':'
directionStr += str(directionTotals[index])
directionStr += '\n'
self.resultsStr += directionStr
reasonStr = ''
if directionReason == TravelGameGlobals.ReasonVote:
if directionToGo == 0:
losingDirection = 1
else:
losingDirection = 0
diffVotes = directionTotals[directionToGo] - directionTotals[losingDirection]
reasonStr = ''
if diffVotes > 1:
reasonStr = TTLocalizer.TravelGameReasonVotesPlural % {'dir': TTLocalizer.TravelGameDirections[directionToGo],
'numVotes': diffVotes}
else:
reasonStr = TTLocalizer.TravelGameReasonVotesSingular % {'dir': TTLocalizer.TravelGameDirections[directionToGo],
'numVotes': diffVotes}
elif directionReason == TravelGameGlobals.ReasonRandom:
reasonStr = TTLocalizer.TravelGameReasonRandom % {'dir': TTLocalizer.TravelGameDirections[directionToGo],
'numVotes': directionTotals[directionToGo]}
elif directionReason == TravelGameGlobals.ReasonPlaceDecider:
reasonStr = TravelGameReasonPlace % {'name': 'TODO NAME',
'dir': TTLocalizer.TravelGameDirections[directionToGo]}
self.resultsStr += reasonStr
self.dialog = TTDialog.TTDialog(text=self.resultsStr, command=self.__cleanupDialog, style=TTDialog.NoButtons, pos=(0, 0, 1))
self.dialog.hide()
if self.UseTrolleyResultsPanel:
self.votesPanel = VoteResultsTrolleyPanel.VoteResultsTrolleyPanel(len(self.avIdList), self.avIdList, votes, directions, self.avNames, self.disconnectedAvIds, directionToGo, directionReason, directionTotals)
else:
self.votesPanel = VoteResultsPanel.VoteResultsPanel(len(self.avIdList), self.avIdList, votes, directions, self.avNames, self.disconnectedAvIds, directionToGo, directionReason, directionTotals)
self.votesPanel.startMovie()
numPlayers = len(self.avIdList)
if TravelGameGlobals.SpoofFour:
numPlayers = 4
delay = TravelGameGlobals.DisplayVotesTimePerPlayer * (numPlayers + 1)
taskMgr.doMethodLater(delay, self.displayVotesTimeoutTask, self.taskName('displayVotes-timeout'))
curSwitch = TravelGameGlobals.BoardLayouts[self.boardIndex][self.currentSwitch]
self.destSwitch = curSwitch['links'][directionToGo]
self.updateCurrentVotes()
def exitDisplayVotes(self):
taskMgr.remove(self.taskName('displayVotes-timeout'))
self.__cleanupDialog(0)
if not self.UseTrolleyResultsPanel:
self.showMinigamesAndBonuses()
self.votesPanel.destroy()
def enterMoveTrolley(self):
self.notify.debug('enterMoveTrolley')
camera.wrtReparentTo(render)
keyAngle = round(self.TrolleyMoveDuration) * 360
dist = Vec3(self.trainSwitches[self.destSwitch].getPos() - self.trainSwitches[self.currentSwitch].getPos()).length()
wheelAngle = dist / (2.0 * math.pi * 0.95) * 360
trolleyAnimateInterval = LerpFunctionInterval(self.animateTrolley, duration=self.TrolleyMoveDuration, blendType='easeInOut', extraArgs=[keyAngle, wheelAngle], name='TrolleyAnimate')
moveTrolley = Sequence()
moveTrolley.append(Func(self.resetAnimation))
newPos = self.trainSwitches[self.destSwitch].getPos()
linkKey = (self.currentSwitch, self.destSwitch)
origHeading = self.trainTracks[linkKey].getH()
heading = origHeading + 90
firstTurn = Parallel()
firstTurn.append(LerpHprInterval(self.trolleyCar, 1, Vec3(heading, 0, 0)))
firstTurn.append(LerpHprInterval(self.trainSwitches[self.currentSwitch], 1, Vec3(origHeading, 0, 0)))
firstTurn.append(LerpHprInterval(self.trainSwitches[self.destSwitch], 1, Vec3(origHeading, 0, 0)))
moveTrolley.append(firstTurn)
moveTrolley.append(Parallel(LerpPosInterval(self.trolleyCar, self.TrolleyMoveDuration, newPos, blendType='easeInOut'), trolleyAnimateInterval))
secondTurn = Parallel()
secondTurn.append(LerpHprInterval(self.trolleyCar, 1, Vec3(90, 0, 0)))
secondTurn.append(LerpHprInterval(self.trainSwitches[self.currentSwitch], 1, Vec3(0, 0, 0)))
secondTurn.append(LerpHprInterval(self.trainSwitches[self.destSwitch], 1, Vec3(0, 0, 0)))
moveTrolley.append(secondTurn)
soundTrack = Sequence()
trolleyExitBellInterval = Parallel(SoundInterval(self.trolleyBellSfx, duration=1), SoundInterval(self.turntableRotateSfx, duration=1, volume=0.5))
trolleyExitAwayInterval = SoundInterval(self.trolleyAwaySfx, duration=3)
soundTrack.append(trolleyExitBellInterval)
soundTrack.append(trolleyExitAwayInterval)
soundTrack.append(trolleyExitBellInterval)
self.moveTrolleyIval = Parallel(moveTrolley, soundTrack)
duration = self.moveTrolleyIval.getDuration()
def focusOnTrolley(t, self = self):
pos = self.trolleyCar.getPos()
pos.setZ(pos.getZ() + 7.5)
camera.lookAt(pos)
self.lastFocusHpr = camera.getHpr()
setRightHprTime = 0
if self.FlyCameraUp:
setRightHprTime = 1.0
camIval1 = Parallel()
camIval1.append(LerpFunc(focusOnTrolley, duration - setRightHprTime, name='focusOnTrolley'))
finalPos = Vec3(self.cameraTopView[0], self.cameraTopView[1], self.cameraTopView[2])
finalHpr = Vec3(self.cameraTopView[3], self.cameraTopView[4], self.cameraTopView[5])
if self.FlyCameraUp:
if self.FocusOnTrolleyWhileMovingUp:
camIval1.append(LerpPosInterval(camera, duration - setRightHprTime, finalPos, name='cameraMove'))
camIval2 = Sequence(LerpHprInterval(camera, setRightHprTime, finalHpr, name='cameraHpr'))
else:
camIval2 = Sequence(LerpPosHprInterval(camera, setRightHprTime, finalPos, finalHpr, blendType='easeIn', name='cameraHpr'))
camIval = Sequence(camIval1, camIval2)
else:
camIval = Sequence(camIval1)
if self.UseTrolleyResultsPanel:
self.moveTrolleyIval.append(camIval)
temp = self.moveTrolleyIval
self.moveTrolleyIval = Sequence(temp)
if self.isLeaf(self.destSwitch):
self.moveTrolleyIval.append(Func(self.gameFSM.request, 'winMovie'))
else:
self.moveTrolleyIval.append(Func(self.gameFSM.request, 'inputChoice'))
self.moveTrolleyIval.start()
def exitMoveTrolley(self):
self.notify.debug('exitMoveTrolley')
self.currentSwitch = self.destSwitch
self.moveTrolleyIval.finish()
self.moveCameraToTop()
self.showMinigamesAndBonuses()
def enterWinMovie(self):
resultStr = TTLocalizer.TravelGamePlaying % {'game': self.idToNames[self.switchToMinigameDict[self.currentSwitch]]}
numToons = 0
for avId in self.avIdList:
if avId not in self.disconnectedAvIds:
numToons += 1
if numToons <= 1:
resultStr = TTLocalizer.TravelGameGoingBackToShop
reachedGoalStr = None
localAvatarWon = False
localAvatarLost = False
noWinner = True
for avId in self.avIdBonuses.keys():
name = ''
avatar = self.getAvatar(avId)
if avatar:
name = avatar.getName()
if self.avIdBonuses[avId][0] == self.currentSwitch:
noWinner = False
reachedGoalStr = TTLocalizer.TravelGameGotBonus % {'name': name,
'numBeans': self.avIdBonuses[avId][1]}
if avId == base.localAvatar.doId:
if not TravelGameGlobals.ReverseWin:
self.wonGameSfx.play()
bonusLabel = self.switchToBonusLabelDict[self.currentSwitch]
self.flashWinningBeansTrack = Sequence(LerpColorScaleInterval(bonusLabel, 0.75, Vec4(0.5, 1, 0.5, 1)), LerpColorScaleInterval(bonusLabel, 0.75, Vec4(1, 1, 1, 1)))
self.flashWinningBeansTrack.loop()
else:
self.lostGameSfx.play()
elif not TravelGameGlobals.ReverseWin:
self.lostGameSfx.play()
else:
self.wonGameSfx.play()
if noWinner:
self.noWinnerSfx.play()
resultStr += '\n\n'
resultStr += TTLocalizer.TravelGameNoOneGotBonus
if reachedGoalStr:
resultStr += '\n\n'
resultStr += reachedGoalStr
self.winDialog = TTDialog.TTDialog(text=resultStr, command=self.__cleanupWinDialog, style=TTDialog.NoButtons)
info = TravelGameGlobals.BoardLayouts[self.boardIndex][self.currentSwitch]
leafX, leafY, leafZ = info['pos']
endX = leafX + TravelGameGlobals.xInc
heading = 90
moveTrolley = Sequence()
moveTrolley.append(LerpHprInterval(self.trolleyCar, 1, Vec3(heading, 0, 0)))
moveTrolley.append(LerpPosInterval(self.trolleyCar, 3, Vec3(endX + 20, leafY, 0)))
soundTrack = Sequence()
trolleyExitBellInterval = SoundInterval(self.trolleyBellSfx, duration=1)
trolleyExitAwayInterval = SoundInterval(self.trolleyAwaySfx, duration=3)
soundTrack.append(trolleyExitBellInterval)
soundTrack.append(trolleyExitAwayInterval)
soundTrack.append(trolleyExitBellInterval)
self.moveTrolleyIval = Parallel(moveTrolley, soundTrack)
self.moveTrolleyIval.start()
delay = 8
taskMgr.doMethodLater(delay, self.gameOverCallback, self.taskName('playMovie'))
return
def exitWinMovie(self):
taskMgr.remove(self.taskName('playMovie'))
self.moveTrolleyIval.finish()
def enterCleanup(self):
self.notify.debug('enterCleanup')
def exitCleanup(self):
pass
def setStartingVotes(self, startingVotesArray):
if not len(startingVotesArray) == len(self.avIdList):
self.notify.error('length does not match, startingVotes=%s, avIdList=%s' % (startingVotesArray, self.avIdList))
return
for index in range(len(self.avIdList)):
avId = self.avIdList[index]
self.startingVotes[avId] = startingVotesArray[index]
if not self.currentVotes.has_key(avId):
self.currentVotes[avId] = startingVotesArray[index]
self.notify.debug('starting votes = %s' % self.startingVotes)
def startTimer(self):
now = globalClock.getFrameTime()
elapsed = now - self.timerStartTime
self.timer.setPos(1.16, 0, -0.83)
self.timer.setTime(TravelGameGlobals.InputTimeout)
self.timer.countdown(TravelGameGlobals.InputTimeout - elapsed, self.handleChoiceTimeout)
self.timer.show()
def setTimerStartTime(self, timestamp):
if not self.hasLocalToon:
return
self.timerStartTime = globalClockDelta.networkToLocalTime(timestamp)
if self.timer != None:
self.startTimer()
return
def handleChoiceTimeout(self):
self.sendUpdate('setAvatarChoice', [0, 0])
self.gameFSM.request('waitServerChoices')
def putChoicesInScrollList(self):
available = self.currentVotes[self.localAvId]
if len(self.scrollList['items']) > 0:
self.scrollList.removeAllItems()
self.indexToVotes = {}
index = 0
for vote in range(available)[::-1]:
self.scrollList.addItem(str(-(vote + 1)))
self.indexToVotes[index] = vote + 1
index += 1
self.scrollList.addItem(str(0))
self.indexToVotes[index] = 0
self.zeroVoteIndex = index
index += 1
for vote in range(available):
self.scrollList.addItem(str(vote + 1))
self.indexToVotes[index] = vote + 1
index += 1
self.scrollList.scrollTo(self.zeroVoteIndex)
def getAbsVoteChoice(self):
available = self.currentVotes[self.localAvId]
retval = 0
if hasattr(self, 'scrollList'):
selectedIndex = self.scrollList.getSelectedIndex()
if self.indexToVotes.has_key(selectedIndex):
retval = self.indexToVotes[selectedIndex]
return retval
def getAbsDirectionChoice(self):
selectedIndex = self.scrollList.getSelectedIndex()
if selectedIndex < self.zeroVoteIndex:
retval = 0
elif selectedIndex == self.zeroVoteIndex:
retval = 0
else:
retval = 1
return retval
def makeTextMatchChoice(self):
self.votesPeriodLabel.hide()
self.votesToGoLabel.hide()
self.upLabel.hide()
self.downLabel.hide()
if not hasattr(self, 'scrollList') or not hasattr(self, 'zeroVoteIndex'):
return
selectedIndex = self.scrollList.getSelectedIndex()
if selectedIndex < self.zeroVoteIndex:
self.votesToGoLabel.show()
self.upLabel.show()
elif selectedIndex == self.zeroVoteIndex:
self.votesPeriodLabel.show()
else:
self.votesToGoLabel.show()
self.downLabel.show()
def scrollChoiceChanged(self):
choiceVotes = self.getAbsVoteChoice()
if choiceVotes == 1:
self.votesToGoLabel['text'] = TTLocalizer.TravelGameVoteToGo
else:
self.votesToGoLabel['text'] = TTLocalizer.TravelGameVotesToGo
available = self.currentVotes[self.localAvId]
self.localVotesRemaining['text'] = str(available - choiceVotes)
self.makeTextMatchChoice()
def setAvatarChose(self, avId):
if not self.hasLocalToon:
return
self.notify.debug('setAvatarChose: avatar: ' + str(avId) + ' choose a number')
def handleInputChoice(self):
numVotes = self.getAbsVoteChoice()
direction = self.getAbsDirectionChoice()
self.sendUpdate('setAvatarChoice', [numVotes, direction])
self.gameFSM.request('waitServerChoices')
def setServerChoices(self, votes, directions, directionToGo, directionReason):
if not self.hasLocalToon:
return
self.notify.debug('requesting displayVotes, curState=%s' % self.gameFSM.getCurrentState().getName())
self.gameFSM.request('displayVotes', [votes,
directions,
directionToGo,
directionReason])
def __cleanupDialog(self, value):
if self.dialog:
self.dialog.cleanup()
self.dialog = None
return
def displayVotesTimeoutTask(self, task):
self.notify.debug('Done waiting for display votes')
self.gameFSM.request('moveTrolley')
return Task.done
def updateCurrentVotes(self):
for index in range(len(self.resultVotes)):
avId = self.avIdList[index]
oldCurrentVotes = self.currentVotes[avId]
self.currentVotes[avId] -= self.resultVotes[index]
self.putChoicesInScrollList()
self.makeTextMatchChoice()
def isLeaf(self, switchIndex):
retval = False
links = TravelGameGlobals.BoardLayouts[self.boardIndex][switchIndex]['links']
if len(links) == 0:
retval = True
return retval
def __cleanupWinDialog(self, value):
if hasattr(self, 'winDialog') and self.winDialog:
self.winDialog.cleanup()
self.winDialog = None
return
def gameOverCallback(self, task):
self.__cleanupWinDialog(0)
self.gameOver()
return Task.done
def setMinigames(self, switches, minigames):
if not self.hasLocalToon:
return
self.switchToMinigameDict = {}
for index in range(len(switches)):
switch = switches[index]
minigame = minigames[index]
self.switchToMinigameDict[switch] = minigame
self.notify.debug('minigameDict = %s' % self.switchToMinigameDict)
self.loadMinigameIcons()
def loadMinigameIcons(self):
self.mg_icons = loader.loadModel('phase_4/models/minigames/mg_icons')
for switch in self.switchToMinigameDict.keys():
minigame = self.switchToMinigameDict[switch]
switchPos = self.trainSwitches[switch].getPos()
labelPos = map3dToAspect2d(render, switchPos)
useText = True
iconName = None
if minigame in IconDict.keys():
iconName = IconDict[minigame]
icon = None
if self.mg_icons:
icon = self.mg_icons.find('**/%s' % iconName)
if not icon.isEmpty():
useText = False
if labelPos:
if useText:
labelPos.setZ(labelPos.getZ() - 0.1)
label = DirectLabel(text=self.idToNames[minigame], relief=None, scale=0.1, pos=labelPos, text_fg=(1.0, 1.0, 1.0, 1.0))
label.hide()
self.minigameLabels.append(label)
else:
placeHolder = DirectButton(image=icon, relief=None, text=('',
'',
self.idToNames[minigame],
''), text_scale=0.3, text_pos=(0, -0.7, 0), text_fg=(1, 1, 1, 1), clickSound=None, pressEffect=0)
placeHolder.setPos(labelPos)
placeHolder.setScale(0.2)
placeHolder.hide()
self.minigameIcons.append(placeHolder)
tunnel = self.tunnels[switch]
sign = tunnel.attachNewNode('sign')
icon.copyTo(sign)
sign.setH(-90)
sign.setZ(26)
sign.setScale(10)
return
def showMinigamesAndBonuses(self):
for label in self.minigameLabels:
label.show()
for label in self.bonusLabels:
label.show()
for icon in self.minigameIcons:
icon.show()
def hideMinigamesAndBonuses(self):
for label in self.minigameLabels:
label.hide()
for label in self.bonusLabels:
label.hide()
for icon in self.minigameIcons:
icon.hide()
def loadBonuses(self):
self.switchToBonusLabelDict = {}
for avId in self.avIdBonuses.keys():
if avId == self.localAvId:
switch = self.avIdBonuses[avId][0]
beans = self.avIdBonuses[avId][1]
switchPos = self.trainSwitches[switch].getPos()
labelPos = map3dToAspect2d(render, switchPos)
if labelPos:
labelPos.setX(labelPos.getX() + 0.1)
labelPos.setZ(labelPos.getZ() - 0.02)
bonusStr = TTLocalizer.TravelGameBonusBeans % {'numBeans': beans}
label = DirectLabel(text=bonusStr, relief=None, scale=0.1, pos=labelPos, text_fg=(1.0, 1.0, 1.0, 1.0), text_align=TextNode.ALeft)
label.hide()
self.bonusLabels.append(label)
self.switchToBonusLabelDict[switch] = label
break
return
def setBonuses(self, switches, beans):
if not self.hasLocalToon:
return
self.avIdBonuses = {}
for index in range(len(self.avIdList)):
avId = self.avIdList[index]
switch = switches[index]
bean = beans[index]
self.avIdBonuses[avId] = (switch, bean)
self.notify.debug('self.avIdBonuses = %s' % self.avIdBonuses)
self.loadBonuses()
def handleDisabledAvatar(self, avId):
self.notify.warning('DistrbutedTravelGame: handleDisabledAvatar: disabled avId: ' + str(avId))
self.disconnectedAvIds.append(avId)
def setBoardIndex(self, boardIndex):
self.boardIndex = boardIndex
def getIntroMovie(self):
rootInfo = TravelGameGlobals.BoardLayouts[self.boardIndex][0]
rootX, rootY, rootZ = rootInfo['pos']
startX = rootX - TravelGameGlobals.xInc
heading = 90
moveTrolley = Sequence()
moveTrolley.append(Func(self.trolleyCar.setH, 90))
moveTrolley.append(LerpPosInterval(self.trolleyCar, 3, Vec3(rootX, rootY, 0), startPos=Vec3(startX, rootY, 0)))
moveTrolley.append(LerpHprInterval(self.trolleyCar, 1, Vec3(heading, 0, 0)))
soundTrack = Sequence()
trolleyExitAwayInterval = SoundInterval(self.trolleyAwaySfx, duration=3)
trolleyExitBellInterval = SoundInterval(self.trolleyBellSfx, duration=1)
soundTrack.append(trolleyExitAwayInterval)
soundTrack.append(trolleyExitBellInterval)
retval = Parallel(moveTrolley, soundTrack)
return retval
def animateTrolley(self, t, keyAngle, wheelAngle):
for i in range(self.numKeys):
key = self.keys[i]
ref = self.keyRef[i]
key.setH(ref, t * keyAngle)
for i in range(self.numFrontWheels):
frontWheel = self.frontWheels[i]
ref = self.frontWheelRef[i]
frontWheel.setH(ref, t * wheelAngle)
for i in range(self.numBackWheels):
backWheel = self.backWheels[i]
ref = self.backWheelRef[i]
backWheel.setH(ref, t * wheelAngle)
def resetAnimation(self):
for i in range(self.numKeys):
self.keys[i].setTransform(self.keyInit[i])
for i in range(self.numFrontWheels):
self.frontWheels[i].setTransform(self.frontWheelInit[i])
for i in range(self.numBackWheels):
self.backWheels[i].setTransform(self.backWheelInit[i])
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utility functions for probing experiments."""
import collections
import random
from typing import Dict, Generator, Iterator, List, Set, Text, Tuple
from absl import logging
import numpy as np
import pandas as pd
from scipy import stats
from sklearn import feature_extraction
from sklearn import neighbors
def split_by_length_threshold(
df, test_set_size):
"""Splits off a test set based on text length.
Finds a length threshold such that all texts longer than this threshold make
up a test set that is at max `test_set_size` in size. Note that the exact
size cannot be guaranteed.
Args:
df: DataFrame with 'text_len' column containing the length of every text.
test_set_size: Number of elements the test set must contain.
Returns:
1. Length threshold.
2. Lengths that exist in the dataset and that are larger than the threshold.
3. Binary mask in the size of the original data, where True indicates that
the example must be part of the test set.
Raises:
ValueError: In case no threshold can be found such that `test_set_size`
elements end up in the test set.
"""
current_count = 0
# Start from the longest texts.
for i in range(max(df['text_len']), 0, -1):
current_count += len(df.loc[df['text_len'] == i, :])
if current_count > test_set_size:
return (i,
set(df['text_len']) & set(range(i + 1,
max(df['text_len']) + 1)),
df.loc[:, 'text_len'] > i)
raise ValueError(
'No length threshold found to create a test set with size {}.'.format(
test_set_size))
def split_by_random_length(
df,
test_set_size,
size_tolerance = 0.01,
max_attempts = 100):
"""Creates new train/dev/test split for SentEval data based on text length.
Finds a random subset of text lengths such that the number of examples having
these lengths is within an accepted window around `test_set_size`. The
accepted window is:
test_set_size * 1 - size_tolerance < window_size < test_set_size * 1 +
size_tolerance$.
Since this is a random process, the script can generate multiple such samples.
Note that the exact size cannot be guaranteed.
Args:
df: DataFrame with 'text_len' column containing the length of every text.
test_set_size: Number of elements the test set must contain. This is the
middle of the window in which the number of test set examples must fall.
size_tolerance: Indicates the width of the window according to the above
formula. This should likely be in [0, 1).
max_attempts: Number of attempts to try to sample a test set that's size is
accepted.
Returns:
1. Lengths that are reserved for the test set.
2. Binary mask in the size of the original data, where True indicates that
the example must be part of the test set.
Raises:
RuntimeError: In case no proper length subset can be found such that the
number of test set examples falls into the window.
"""
all_lengths = set(df['text_len'])
for _ in range(max_attempts):
test_mask = pd.Series([False] * len(df), dtype=bool)
remaining_lengths = all_lengths.copy()
while remaining_lengths:
selected_length = random.choice(tuple(remaining_lengths))
remaining_lengths.remove(selected_length)
test_mask.loc[df['text_len'] == selected_length] = True
# Keep adding lengths until we have a minimum of test examples.
if test_mask.sum() < test_set_size * (1 - size_tolerance):
continue
# If we are still within the tolerance we take this configuration.
if test_mask.sum() < test_set_size * (1 + size_tolerance):
return all_lengths - remaining_lengths, test_mask
# Otherwise we need to start over again.
break
raise RuntimeError(
'No proper split found. Consider increasing the number of attempts or '
'the size tolerance.')
def split_with_wasserstein(texts, test_set_size,
no_of_trials, min_df,
leaf_size):
"""Finds test sets by maximizing Wasserstein distances among the given texts.
This is separating the given texts into training/dev and test sets based on an
approximate Wasserstein method. First all texts are indexed in a nearest
neighbors structure. Then a new test centroid is sampled randomly, from which
the nearest neighbors in Wasserstein space are extracted. Those constitute
the new test set.
Similarity is computed based on document-term counts.
Args:
texts: Texts to split into training/dev and test sets.
test_set_size: Number of elements the new test set should contain.
no_of_trials: Number of test sets requested.
min_df: Mainly for speed-up and memory efficiency. All tokens must occur at
least this many times to be considered in the Wasserstein computation.
leaf_size: Leaf size parameter of the nearest neighbor search. Set high
values for slower, but less memory-heavy computation.
Returns:
Returns a List of test set indices, one for each trial. The indices
correspond to the items in `texts` that should be part of the test set.
"""
vectorizer = feature_extraction.text.CountVectorizer(
dtype=np.int8, min_df=min_df)
logging.info('Creating count vectors.')
text_counts = vectorizer.fit_transform(texts)
text_counts = text_counts.todense()
logging.info('Count vector shape %s.', text_counts.shape)
logging.info('Creating tree structure.')
nn_tree = neighbors.NearestNeighbors(
n_neighbors=test_set_size,
algorithm='ball_tree',
leaf_size=leaf_size,
metric=stats.wasserstein_distance)
nn_tree.fit(text_counts)
logging.info('Sampling test sets.')
test_set_indices = []
for trial in range(no_of_trials):
logging.info('Trial set: %d.', trial)
# Sample random test centroid.
sampled_poind = np.random.randint(
text_counts.max().max() + 1, size=(1, text_counts.shape[1]))
nearest_neighbors = nn_tree.kneighbors(sampled_poind, return_distance=False)
# We queried for only one datapoint.
nearest_neighbors = nearest_neighbors[0]
logging.info(nearest_neighbors[:10])
test_set_indices.append(nearest_neighbors)
return test_set_indices
def get_target_word_to_sentence_mapping(
target_words, ignore_sentences,
sentence_iter):
"""Finds target words in sentences and groups sentences together.
Maps all target words to sentences that contain only this target word but no
other.
Args:
target_words: Tokens to find within sentences. Only one of them is allowed
to occur in the sentences.
ignore_sentences: Sentences to ignore during iteration. These are likely the
sentences from the original SentEval dataset, because we want to create
entirely new test sets.
sentence_iter: Provider for sentences to filter.
Returns:
Mapping from target word to all the sentences from `sentence_iter` that
contain this target word but no other target words.
"""
target_word_to_sentences = collections.defaultdict(list)
duplicate_count = 0
for i, sentence in enumerate(sentence_iter):
logging.log_every_n(logging.INFO, f'Sentences analyzed: {i}.', 100000)
if sentence in ignore_sentences:
logging.warning('Sentence already exists and will be ignored: "%s".',
sentence)
duplicate_count += 1
continue
tokens = set(sentence.split())
intersection = tokens.intersection(target_words)
# The sentence can only be used if exactly one of the target words occurs in
# it.
if len(intersection) != 1:
continue
target_word_to_sentences[next(iter(intersection))].append(sentence)
logging.info('Duplicate sentences found: %d.', duplicate_count)
return dict(target_word_to_sentences)
def read_senteval_data(senteval_path, task_name):
"""Loads one official SentEval data from given a path into a pandas DataFrame.
Args:
senteval_path: base directory of the original SentEval data. Most likely
this is the "probing" directory of the original directory structure.
task_name: name of the task whose data file exist in the `senteval_path`.
Returns:
DataFrame with all content from the input file.
"""
filename = '{}/{}'.format(senteval_path, task_name)
with open(filename) as handle:
# Setting quotechar to the delimiter prevents pandas from mistreating
# quotes. The original data isn't properly escaped csv in this sense.
df = pd.read_csv(
handle,
sep='\t',
header=None,
names=['set', 'target', 'text'],
encoding='utf-8',
quotechar='\t')
return df
raise ValueError('Error reading SentEval data file: {}.'.format(filename))
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import functools
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.heat import software_config as sc
from heat.engine.resources.openstack.heat import software_deployment as sd
from heat.engine import rsrc_defn
from heat.engine import support
class StructuredConfig(sc.SoftwareConfig):
"""A resource which has same logic with OS::Heat::SoftwareConfig.
This resource is like OS::Heat::SoftwareConfig except that the config
property is represented by a Map rather than a String.
This is useful for configuration tools which use YAML or JSON as their
configuration syntax. The resulting configuration is transferred,
stored and returned by the software_configs API as parsed JSON.
"""
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
GROUP,
CONFIG,
OPTIONS,
INPUTS,
OUTPUTS
) = (
sc.SoftwareConfig.GROUP,
sc.SoftwareConfig.CONFIG,
sc.SoftwareConfig.OPTIONS,
sc.SoftwareConfig.INPUTS,
sc.SoftwareConfig.OUTPUTS
)
properties_schema = {
GROUP: sc.SoftwareConfig.properties_schema[GROUP],
OPTIONS: sc.SoftwareConfig.properties_schema[OPTIONS],
INPUTS: sc.SoftwareConfig.properties_schema[INPUTS],
OUTPUTS: sc.SoftwareConfig.properties_schema[OUTPUTS],
CONFIG: properties.Schema(
properties.Schema.MAP,
_('Map representing the configuration data structure which will '
'be serialized to JSON format.')
)
}
class StructuredDeployment(sd.SoftwareDeployment):
"""A resource which has same logic with OS::Heat::SoftwareDeployment.
A deployment resource like OS::Heat::SoftwareDeployment, but which
performs input value substitution on the config defined by a
OS::Heat::StructuredConfig resource.
Some configuration tools have no concept of inputs, so the input value
substitution needs to occur in the deployment resource. An example of this
is the JSON metadata consumed by the cfn-init tool.
Where the config contains {get_input: input_name} this will be substituted
with the value of input_name in this resource's input_values. If get_input
needs to be passed through to the substituted configuration then a
different input_key property value can be specified.
"""
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
CONFIG,
SERVER,
INPUT_VALUES,
DEPLOY_ACTIONS,
NAME,
SIGNAL_TRANSPORT,
INPUT_KEY,
INPUT_VALUES_VALIDATE
) = (
sd.SoftwareDeployment.CONFIG,
sd.SoftwareDeployment.SERVER,
sd.SoftwareDeployment.INPUT_VALUES,
sd.SoftwareDeployment.DEPLOY_ACTIONS,
sd.SoftwareDeployment.NAME,
sd.SoftwareDeployment.SIGNAL_TRANSPORT,
'input_key',
'input_values_validate'
)
_sd_ps = sd.SoftwareDeployment.properties_schema
properties_schema = {
CONFIG: _sd_ps[CONFIG],
SERVER: _sd_ps[SERVER],
INPUT_VALUES: _sd_ps[INPUT_VALUES],
DEPLOY_ACTIONS: _sd_ps[DEPLOY_ACTIONS],
SIGNAL_TRANSPORT: _sd_ps[SIGNAL_TRANSPORT],
NAME: _sd_ps[NAME],
INPUT_KEY: properties.Schema(
properties.Schema.STRING,
_('Name of key to use for substituting inputs during deployment.'),
default='get_input',
),
INPUT_VALUES_VALIDATE: properties.Schema(
properties.Schema.STRING,
_('Perform a check on the input values passed to verify that '
'each required input has a corresponding value. '
'When the property is set to STRICT and no value is passed, '
'an exception is raised.'),
default='LAX',
constraints=[
constraints.AllowedValues(['LAX', 'STRICT']),
],
)
}
def empty_config(self):
return {}
def _build_derived_config(self, action, source,
derived_inputs, derived_options):
cfg = source.get(sc.SoftwareConfig.CONFIG)
input_key = self.properties[self.INPUT_KEY]
check_input_val = self.properties[self.INPUT_VALUES_VALIDATE]
inputs = dict(i.input_data() for i in derived_inputs)
return self.parse(inputs, input_key, cfg, check_input_val)
@staticmethod
def get_input_key_arg(snippet, input_key):
if len(snippet) != 1:
return None
fn_name, fn_arg = next(six.iteritems(snippet))
if (fn_name == input_key and isinstance(fn_arg, six.string_types)):
return fn_arg
@staticmethod
def get_input_key_value(fn_arg, inputs, check_input_val='LAX'):
if check_input_val == 'STRICT' and fn_arg not in inputs:
raise exception.UserParameterMissing(key=fn_arg)
return inputs.get(fn_arg)
@staticmethod
def parse(inputs, input_key, snippet, check_input_val='LAX'):
parse = functools.partial(
StructuredDeployment.parse,
inputs,
input_key,
check_input_val=check_input_val)
if isinstance(snippet, collections.Mapping):
fn_arg = StructuredDeployment.get_input_key_arg(snippet, input_key)
if fn_arg is not None:
return StructuredDeployment.get_input_key_value(fn_arg, inputs,
check_input_val
)
return dict((k, parse(v)) for k, v in six.iteritems(snippet))
elif (not isinstance(snippet, six.string_types) and
isinstance(snippet, collections.Iterable)):
return [parse(v) for v in snippet]
else:
return snippet
class StructuredDeploymentGroup(sd.SoftwareDeploymentGroup):
"""This resource associates a group of servers with some configuration.
This resource works similar as OS::Heat::SoftwareDeploymentGroup, but for
structured resources.
"""
PROPERTIES = (
SERVERS,
CONFIG,
INPUT_VALUES,
DEPLOY_ACTIONS,
NAME,
SIGNAL_TRANSPORT,
INPUT_KEY,
INPUT_VALUES_VALIDATE,
) = (
sd.SoftwareDeploymentGroup.SERVERS,
sd.SoftwareDeploymentGroup.CONFIG,
sd.SoftwareDeploymentGroup.INPUT_VALUES,
sd.SoftwareDeploymentGroup.DEPLOY_ACTIONS,
sd.SoftwareDeploymentGroup.NAME,
sd.SoftwareDeploymentGroup.SIGNAL_TRANSPORT,
StructuredDeployment.INPUT_KEY,
StructuredDeployment.INPUT_VALUES_VALIDATE
)
_sds_ps = sd.SoftwareDeploymentGroup.properties_schema
properties_schema = {
SERVERS: _sds_ps[SERVERS],
CONFIG: _sds_ps[CONFIG],
INPUT_VALUES: _sds_ps[INPUT_VALUES],
DEPLOY_ACTIONS: _sds_ps[DEPLOY_ACTIONS],
SIGNAL_TRANSPORT: _sds_ps[SIGNAL_TRANSPORT],
NAME: _sds_ps[NAME],
INPUT_KEY: StructuredDeployment.properties_schema[INPUT_KEY],
INPUT_VALUES_VALIDATE:
StructuredDeployment.properties_schema[INPUT_VALUES_VALIDATE],
}
def build_resource_definition(self, res_name, res_defn):
props = copy.deepcopy(res_defn)
servers = props.pop(self.SERVERS)
props[StructuredDeployment.SERVER] = servers.get(res_name)
return rsrc_defn.ResourceDefinition(res_name,
'OS::Heat::StructuredDeployment',
props, None)
class StructuredDeployments(StructuredDeploymentGroup):
hidden_msg = _('Please use OS::Heat::StructuredDeploymentGroup instead.')
support_status = support.SupportStatus(
status=support.HIDDEN,
message=hidden_msg,
version='7.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2'))
def resource_mapping():
return {
'OS::Heat::StructuredConfig': StructuredConfig,
'OS::Heat::StructuredDeployment': StructuredDeployment,
'OS::Heat::StructuredDeploymentGroup': StructuredDeploymentGroup,
'OS::Heat::StructuredDeployments': StructuredDeployments,
}
|
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.net.buildbot.buildbot_mock import MockBuilder
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.rebaseline import *
from webkitpy.tool.mocktool import MockTool, MockOptions
class _BaseTestCase(unittest.TestCase):
MOCK_WEB_RESULT = 'MOCK Web result, convert 404 to None=True'
WEB_PREFIX = 'http://example.com/f/builders/WebKit Mac10.7/results/layout-test-results'
command_constructor = None
def setUp(self):
self.tool = MockTool()
self.command = self.command_constructor() # lint warns that command_constructor might not be set, but this is intentional; pylint: disable=E1102
self.command.bind_to_tool(self.tool)
self.lion_port = self.tool.port_factory.get_from_builder_name("WebKit Mac10.7")
self.lion_expectations_path = self.lion_port.path_to_generic_test_expectations_file()
# FIXME: crbug.com/279494. We should override builders._exact_matches
# here to point to a set of test ports and restore the value in
# tearDown(), and that way the individual tests wouldn't have to worry
# about it.
def _expand(self, path):
if self.tool.filesystem.isabs(path):
return path
return self.tool.filesystem.join(self.lion_port.layout_tests_dir(), path)
def _read(self, path):
return self.tool.filesystem.read_text_file(self._expand(path))
def _write(self, path, contents):
self.tool.filesystem.write_text_file(self._expand(path), contents)
def _zero_out_test_expectations(self):
for port_name in self.tool.port_factory.all_port_names():
port = self.tool.port_factory.get(port_name)
for path in port.expectations_files():
self._write(path, '')
self.tool.filesystem.written_files = {}
def _setup_mock_builder_data(self):
data = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"userscripts": {
"first-test.html": {
"expected": "PASS",
"actual": "IMAGE+TEXT"
},
"second-test.html": {
"expected": "FAIL",
"actual": "IMAGE+TEXT"
}
}
}
});""")
# FIXME: crbug.com/279494 - we shouldn't be mixing mock and real builder names.
for builder in ['MOCK builder', 'MOCK builder (Debug)', 'WebKit Mac10.7']:
self.command._builder_data[builder] = data
class TestCopyExistingBaselinesInternal(_BaseTestCase):
command_constructor = CopyExistingBaselinesInternal
def setUp(self):
super(TestCopyExistingBaselinesInternal, self).setUp()
def test_copying_overwritten_baseline(self):
self.tool.executive = MockExecutive2()
# FIXME: crbug.com/279494. it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-mac-snowleopard')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK SnowLeopard", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": []}\n')
def test_copying_overwritten_baseline_to_multiple_locations(self):
self.tool.executive = MockExecutive2()
# FIXME: crbug.com/279494. it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-win-win7')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
"MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')), 'original win7 result')
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/mac-leopard/userscripts/another-test-expected.txt')))
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": []}\n')
def test_no_copy_existing_baseline(self):
self.tool.executive = MockExecutive2()
# FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-win-win7')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
"MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')), 'original win7 result')
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')), 'original win7 result')
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/mac-leopard/userscripts/another-test-expected.txt')))
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": []}\n')
def test_no_copy_skipped_test(self):
self.tool.executive = MockExecutive2()
port = self.tool.port_factory.get('test-win-win7')
fs = self.tool.filesystem
self._write(fs.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
expectations_path = fs.join(port.path_to_generic_test_expectations_file())
self._write(expectations_path, (
"[ Win ] failures/expected/image.html [ Failure ]\n"
"[ Linux ] failures/expected/image.html [ Skip ]\n"))
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertFalse(fs.exists(fs.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')))
self.assertEqual(self._read(fs.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')),
'original win7 result')
class TestRebaselineTest(_BaseTestCase):
command_constructor = RebaselineTest # AKA webkit-patch rebaseline-test-internal
def setUp(self):
super(TestRebaselineTest, self).setUp()
self.options = MockOptions(builder="WebKit Mac10.7", test="userscripts/another-test.html", suffixes="txt", results_directory=None)
def test_baseline_directory(self):
command = self.command
self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.7"), "/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-lion")
self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.6"), "/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard")
def test_rebaseline_updates_expectations_file_noop(self):
self._zero_out_test_expectations()
self._write(self.lion_expectations_path, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
""")
self._write("fast/dom/Window/window-postmessage-clone-really-deep-array.html", "Dummy test contents")
self._write("fast/css/large-list-of-rules-crash.html", "Dummy test contents")
self._write("userscripts/another-test.html", "Dummy test contents")
self.options.suffixes = "png,wav,txt"
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
""")
def test_rebaseline_test(self):
self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", "txt", self.WEB_PREFIX)
self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
def test_rebaseline_test_with_results_directory(self):
self._write("userscripts/another-test.html", "test data")
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
self.options.results_directory = '/tmp'
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched, ['file:///tmp/userscripts/another-test-actual.txt'])
def test_rebaseline_reftest(self):
self._write("userscripts/another-test.html", "test data")
self._write("userscripts/another-test-expected.html", "generic result")
OutputCapture().assert_outputs(self, self.command._rebaseline_test_and_update_expectations, args=[self.options],
expected_logs="Cannot rebaseline reftest: userscripts/another-test.html\n")
self.assertDictEqual(self.command._scm_changes, {'add': [], 'remove-lines': []})
def test_rebaseline_test_and_print_scm_changes(self):
self.command._print_scm_changes = True
self.command._scm_changes = {'add': [], 'delete': []}
self.tool._scm.exists = lambda x: False
self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", "txt", None)
self.assertDictEqual(self.command._scm_changes, {'add': ['/mock-checkout/third_party/WebKit/LayoutTests/platform/linux/userscripts/another-test-expected.txt'], 'delete': []})
def test_rebaseline_test_internal_with_port_that_lacks_buildbot(self):
self.tool.executive = MockExecutive2()
# FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-win-win7')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK XP": {"port_name": "test-win-xp"},
"MOCK Win7": {"port_name": "test-win-win7"},
}
options = MockOptions(optimize=True, builder="MOCK Win7", suffixes="txt",
verbose=True, test="failures/expected/image.html", results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')), 'MOCK Web result, convert 404 to None=True')
self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-xp/failures/expected/image-expected.txt')))
self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK Win7"}]}\n')
class TestAbstractParallelRebaselineCommand(_BaseTestCase):
command_constructor = AbstractParallelRebaselineCommand
def test_builders_to_fetch_from(self):
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK XP": {"port_name": "test-win-xp"},
"MOCK Win7": {"port_name": "test-win-win7"},
"MOCK Win7 (dbg)(1)": {"port_name": "test-win-win7"},
"MOCK Win7 (dbg)(2)": {"port_name": "test-win-win7"},
}
builders_to_fetch = self.command._builders_to_fetch_from(["MOCK XP", "MOCK Win7 (dbg)(1)", "MOCK Win7 (dbg)(2)", "MOCK Win7"])
self.assertEqual(builders_to_fetch, ["MOCK XP", "MOCK Win7"])
finally:
builders._exact_matches = old_exact_matches
class TestRebaselineJson(_BaseTestCase):
command_constructor = RebaselineJson
def setUp(self):
super(TestRebaselineJson, self).setUp()
self.tool.executive = MockExecutive2()
self.old_exact_matches = builders._exact_matches
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-snowleopard"},
"MOCK builder (Debug)": {"port_name": "test-mac-snowleopard"},
}
def tearDown(self):
builders._exact_matches = self.old_exact_matches
super(TestRebaselineJson, self).tearDown()
def test_rebaseline_test_passes_on_all_builders(self):
self._setup_mock_builder_data()
def builder_data():
self.command._builder_data['MOCK builder'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"userscripts": {
"first-test.html": {
"expected": "NEEDSREBASELINE",
"actual": "PASS"
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
options = MockOptions(optimize=True, verbose=True, results_directory=None)
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[['echo', '--verbose', 'optimize-baselines', '--suffixes', '', 'userscripts/first-test.html']])
def test_rebaseline_all(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=True, verbose=True, results_directory=None)
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'userscripts/first-test.html']])
def test_rebaseline_debug(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=True, verbose=True, results_directory=None)
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'userscripts/first-test.html']])
def test_no_optimize(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
# Note that we have only one run_in_parallel() call
self.assertEqual(self.tool.executive.calls,
[[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']]])
def test_results_directory(self):
self._setup_mock_builder_data()
options = MockOptions(optimize=False, verbose=True, results_directory='/tmp')
self._write("userscripts/first-test.html", "Dummy test contents")
self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have only one run_in_parallel() call
self.assertEqual(self.tool.executive.calls,
[[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--results-directory', '/tmp', '--verbose']],
[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--results-directory', '/tmp', '--verbose']]])
class TestRebaselineJsonUpdatesExpectationsFiles(_BaseTestCase):
command_constructor = RebaselineJson
def setUp(self):
super(TestRebaselineJsonUpdatesExpectationsFiles, self).setUp()
self.tool.executive = MockExecutive2()
def mock_run_command(args,
cwd=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=False,
env=None):
return '{"add": [], "remove-lines": [{"test": "userscripts/first-test.html", "builder": "WebKit Mac10.7"}]}\n'
self.tool.executive.run_command = mock_run_command
def test_rebaseline_updates_expectations_file(self):
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/first-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Mavericks MountainLion Retina SnowLeopard ] userscripts/first-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_updates_expectations_file_all_platforms(self):
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ ImageOnlyFailure ]\n")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Android Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_handles_platform_skips(self):
# This test is just like test_rebaseline_updates_expectations_file_all_platforms(),
# except that if a particular port happens to SKIP a test in an overrides file,
# we count that as passing, and do not think that we still need to rebaseline it.
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ ImageOnlyFailure ]\n")
self._write("NeverFixTests", "Bug(y) [ Android ] userscripts [ Skip ]\n")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_handles_skips_in_file(self):
# This test is like test_Rebaseline_handles_platform_skips, except that the
# Skip is in the same (generic) file rather than a platform file. In this case,
# the Skip line should be left unmodified. Note that the first line is now
# qualified as "[Linux Mac Win]"; if it was unqualified, it would conflict with
# the second line.
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path,
("Bug(x) [ Linux Mac Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n"
"Bug(y) [ Android ] userscripts/first-test.html [ Skip ]\n"))
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations,
("Bug(x) [ Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n"
"Bug(y) [ Android ] userscripts/first-test.html [ Skip ]\n"))
def test_rebaseline_handles_smoke_tests(self):
# This test is just like test_rebaseline_handles_platform_skips, except that we check for
# a test not being in the SmokeTests file, instead of using overrides files.
# If a test is not part of the smoke tests, we count that as passing on ports that only
# run smoke tests, and do not think that we still need to rebaseline it.
options = MockOptions(optimize=False, verbose=True, results_directory=None)
self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ ImageOnlyFailure ]\n")
self._write("SmokeTests", "fast/html/article-element.html")
self._write("userscripts/first-test.html", "Dummy test contents")
self._setup_mock_builder_data()
self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
class TestRebaseline(_BaseTestCase):
# This command shares most of its logic with RebaselineJson, so these tests just test what is different.
command_constructor = Rebaseline # AKA webkit-patch rebaseline
def test_rebaseline(self):
self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
self._write("userscripts/first-test.html", "test data")
self._zero_out_test_expectations()
self._setup_mock_builder_data()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
}
self.command.execute(MockOptions(results_directory=False, optimize=False, builders=None, suffixes="txt,png", verbose=True), ['userscripts/first-test.html'], self.tool)
finally:
builders._exact_matches = old_exact_matches
calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
self.assertEqual(calls,
[[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']]])
def test_rebaseline_directory(self):
self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
self._write("userscripts/first-test.html", "test data")
self._write("userscripts/second-test.html", "test data")
self._setup_mock_builder_data()
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
}
self.command.execute(MockOptions(results_directory=False, optimize=False, builders=None, suffixes="txt,png", verbose=True), ['userscripts'], self.tool)
finally:
builders._exact_matches = old_exact_matches
calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
self.assertEqual(calls,
[[['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose'],
['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/second-test.html', '--verbose']],
[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose'],
['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/second-test.html', '--verbose']]])
class TestRebaselineExpectations(_BaseTestCase):
command_constructor = RebaselineExpectations
def setUp(self):
super(TestRebaselineExpectations, self).setUp()
self.options = MockOptions(optimize=False, builders=None, suffixes=['txt'], verbose=False, platform=None, results_directory=None)
def test_rebaseline_expectations(self):
self._zero_out_test_expectations()
self.tool.executive = MockExecutive2()
def builder_data():
self.command._builder_data['MOCK SnowLeopard'] = self.command._builder_data['MOCK Leopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"userscripts": {
"another-test.html": {
"expected": "PASS",
"actual": "PASS TEXT"
},
"images.svg": {
"expected": "FAIL",
"actual": "IMAGE+TEXT"
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self._write("userscripts/another-test.html", "Dummy test contents")
self._write("userscripts/images.svg", "Dummy test contents")
self.command._tests_to_rebaseline = lambda port: {
'userscripts/another-test.html': set(['txt']),
'userscripts/images.svg': set(['png']),
'userscripts/not-actually-failing.html': set(['txt', 'png', 'wav']),
}
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.execute(self.options, [], self.tool)
finally:
builders._exact_matches = old_exact_matches
# FIXME: change this to use the test- ports.
calls = filter(lambda x: x != ['qmake', '-v'], self.tool.executive.calls)
self.assertEqual(self.tool.executive.calls, [
[
['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'userscripts/another-test.html'],
['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/another-test.html'],
['echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK Leopard', '--test', 'userscripts/images.svg'],
['echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/images.svg']
],
[
['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'userscripts/another-test.html'],
['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/another-test.html'],
['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK Leopard', '--test', 'userscripts/images.svg'],
['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/images.svg']
]
])
def test_rebaseline_expectations_noop(self):
self._zero_out_test_expectations()
oc = OutputCapture()
try:
oc.capture_output()
self.command.execute(self.options, [], self.tool)
finally:
_, _, logs = oc.restore_output()
self.assertEqual(self.tool.filesystem.written_files, {})
self.assertEqual(logs, 'Did not find any tests marked Rebaseline.\n')
def disabled_test_overrides_are_included_correctly(self):
# This tests that the any tests marked as REBASELINE in the overrides are found, but
# that the overrides do not get written into the main file.
self._zero_out_test_expectations()
self._write(self.lion_expectations_path, '')
self.lion_port.expectations_dict = lambda: {
self.lion_expectations_path: '',
'overrides': ('Bug(x) userscripts/another-test.html [ Failure Rebaseline ]\n'
'Bug(y) userscripts/test.html [ Crash ]\n')}
self._write('/userscripts/another-test.html', '')
self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': set(['png', 'txt', 'wav'])})
self.assertEqual(self._read(self.lion_expectations_path), '')
def test_rebaseline_without_other_expectations(self):
self._write("userscripts/another-test.html", "Dummy test contents")
self._write(self.lion_expectations_path, "Bug(x) userscripts/another-test.html [ Rebaseline ]\n")
self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': ('png', 'wav', 'txt')})
class _FakeOptimizer(BaselineOptimizer):
def read_results_by_directory(self, baseline_name):
if baseline_name.endswith('txt'):
return {'LayoutTests/passes/text.html': '123456'}
return {}
class TestAnalyzeBaselines(_BaseTestCase):
command_constructor = AnalyzeBaselines
def setUp(self):
super(TestAnalyzeBaselines, self).setUp()
self.port = self.tool.port_factory.get('test')
self.tool.port_factory.get = (lambda port_name=None, options=None: self.port)
self.lines = []
self.command._optimizer_class = _FakeOptimizer
self.command._write = (lambda msg: self.lines.append(msg)) # pylint bug warning about unnecessary lambda? pylint: disable=W0108
def test_default(self):
self.command.execute(MockOptions(suffixes='txt', missing=False, platform=None), ['passes/text.html'], self.tool)
self.assertEqual(self.lines,
['passes/text-expected.txt:',
' (generic): 123456'])
def test_missing_baselines(self):
self.command.execute(MockOptions(suffixes='png,txt', missing=True, platform=None), ['passes/text.html'], self.tool)
self.assertEqual(self.lines,
['passes/text-expected.png: (no baselines found)',
'passes/text-expected.txt:',
' (generic): 123456'])
class TestAutoRebaseline(_BaseTestCase):
command_constructor = AutoRebaseline
def _write_test_file(self, port, path, contents):
abs_path = self.tool.filesystem.join(port.layout_tests_dir(), path)
self.tool.filesystem.write_text_file(abs_path, contents)
def setUp(self):
super(TestAutoRebaseline, self).setUp()
self.command.latest_revision_processed_on_all_bots = lambda log_server: 9000
self.command.bot_revision_data = lambda log_server: [{"builder": "Mock builder", "revision": "9000"}]
def test_tests_to_rebaseline(self):
def blame(path):
return """
624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ ImageOnlyFailure ]
624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) Bug(foo) path/to/rebaseline-without-bug-number.html [ NeedsRebaseline ]
624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/rebaseline-with-modifiers.html [ NeedsRebaseline ]
624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 crbug.com/234 path/to/rebaseline-without-modifiers.html [ NeedsRebaseline ]
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/rebaseline-new-revision.html [ NeedsRebaseline ]
624caaaaaa path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
0000000000 path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
min_revision = 9000
self.assertEqual(self.command.tests_to_rebaseline(self.tool, min_revision, print_revisions=False, log_server=None), (
set(['path/to/rebaseline-without-bug-number.html', 'path/to/rebaseline-with-modifiers.html', 'path/to/rebaseline-without-modifiers.html']),
5678,
'foobarbaz1@chromium.org',
set(['24182', '234']),
True))
def test_tests_to_rebaseline_over_limit(self):
def blame(path):
result = ""
for i in range(0, self.command.MAX_LINES_TO_REBASELINE + 1):
result += "624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) crbug.com/24182 path/to/rebaseline-%s.html [ NeedsRebaseline ]\n" % i
return result
self.tool.scm().blame = blame
expected_list_of_tests = []
for i in range(0, self.command.MAX_LINES_TO_REBASELINE):
expected_list_of_tests.append("path/to/rebaseline-%s.html" % i)
min_revision = 9000
self.assertEqual(self.command.tests_to_rebaseline(self.tool, min_revision, print_revisions=False, log_server=None), (
set(expected_list_of_tests),
5678,
'foobarbaz1@chromium.org',
set(['24182']),
True))
def test_commit_message(self):
author = "foo@chromium.org"
revision = 1234
bugs = set()
self.assertEqual(self.command.commit_message(author, revision, bugs),
"""Auto-rebaseline for r1234
http://src.chromium.org/viewvc/blink?view=revision&revision=1234
TBR=foo@chromium.org
""")
bugs = set(["234", "345"])
self.assertEqual(self.command.commit_message(author, revision, bugs),
"""Auto-rebaseline for r1234
http://src.chromium.org/viewvc/blink?view=revision&revision=1234
BUG=234,345
TBR=foo@chromium.org
""")
def test_no_needs_rebaseline_lines(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ ImageOnlyFailure ]
"""
self.tool.scm().blame = blame
self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False, log_server=None), [], self.tool)
self.assertEqual(self.tool.executive.calls, [])
def test_execute(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) # Test NeedsRebaseline being in a comment doesn't bork parsing.
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ ImageOnlyFailure ]
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ SnowLeopard ] fast/dom/prototype-strawberry.html [ NeedsRebaseline ]
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
624caaaaaa path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
0000000000 path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
test_port = self.tool.port_factory.get('test')
original_get = self.tool.port_factory.get
def get_test_port(port_name=None, options=None, **kwargs):
if not port_name:
return test_port
return original_get(port_name, options, **kwargs)
# Need to make sure all the ports grabbed use the test checkout path instead of the mock checkout path.
# FIXME: crbug.com/279494 - we shouldn't be doing this.
self.tool.port_factory.get = get_test_port
old_builder_data = self.command.builder_data
def builder_data():
old_builder_data()
# have prototype-chocolate only fail on "MOCK Leopard".
self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "PASS",
"actual": "PASS TEXT",
"is_unexpected": true
},
"prototype-chocolate.html": {
"expected": "FAIL",
"actual": "PASS"
},
"prototype-strawberry.html": {
"expected": "PASS",
"actual": "IMAGE PASS",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Rebaseline ]
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
crbug.com/24182 [ SnowLeopard ] fast/dom/prototype-strawberry.html [ NeedsRebaseline ]
crbug.com/24182 fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
self._write_test_file(test_port, 'fast/dom/prototype-strawberry.html', "Dummy test contents")
self._write_test_file(test_port, 'fast/dom/prototype-chocolate.html', "Dummy test contents")
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.tree_status = lambda: 'closed'
self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False, log_server=None), [], self.tool)
self.assertEqual(self.tool.executive.calls, [])
self.command.tree_status = lambda: 'open'
self.tool.executive.calls = []
self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False, log_server=None), [], self.tool)
self.assertEqual(self.tool.executive.calls, [
[
['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-chocolate.html'],
['echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-strawberry.html'],
['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-taco.html'],
['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
[
['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-chocolate.html'],
['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-strawberry.html'],
['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-taco.html'],
['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
['echo', 'optimize-baselines', '--suffixes', 'txt,png', 'fast/dom/prototype-chocolate.html'],
['echo', 'optimize-baselines', '--suffixes', 'png', 'fast/dom/prototype-strawberry.html'],
['echo', 'optimize-baselines', '--suffixes', 'txt', 'fast/dom/prototype-taco.html'],
['git', 'pull'],
])
# The mac ports should both be removed since they're the only ones in builders._exact_matches.
self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Rebaseline ]
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
crbug.com/24182 [ Linux Win ] fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
""")
finally:
builders._exact_matches = old_exact_matches
def test_execute_test_passes_everywhere(self):
def blame(path):
return """
6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
"""
self.tool.scm().blame = blame
test_port = self.tool.port_factory.get('test')
original_get = self.tool.port_factory.get
def get_test_port(port_name=None, options=None, **kwargs):
if not port_name:
return test_port
return original_get(port_name, options, **kwargs)
# Need to make sure all the ports grabbed use the test checkout path instead of the mock checkout path.
# FIXME: crbug.com/279494 - we shouldn't be doing this.
self.tool.port_factory.get = get_test_port
old_builder_data = self.command.builder_data
def builder_data():
self.command._builder_data['MOCK Leopard'] = self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
"tests": {
"fast": {
"dom": {
"prototype-taco.html": {
"expected": "PASS",
"actual": "PASS TEXT",
"is_unexpected": true
}
}
}
}
});""")
return self.command._builder_data
self.command.builder_data = builder_data
self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
old_exact_matches = builders._exact_matches
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
self.command.tree_status = lambda: 'open'
self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False, log_server=None), [], self.tool)
self.assertEqual(self.tool.executive.calls, [
[
['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-taco.html'],
['echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
[
['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-taco.html'],
['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
],
['echo', 'optimize-baselines', '--suffixes', 'txt', 'fast/dom/prototype-taco.html'],
['git', 'pull'],
])
# The mac ports should both be removed since they're the only ones in builders._exact_matches.
self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
""")
finally:
builders._exact_matches = old_exact_matches
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
import hashlib
import random
import uuid
from action import base_manager
from bottle import redirect
from entity import user as user_entity
from helper import hint
from helper import translator
from helper import url
class UserManager(base_manager.BaseManager):
""" Handle user related actions.
Constants:
SESSION_COOKIE -- The session cookie (string).
USER_NAME_COOKIE -- The user name cookie (string).
Member:
db -- The database connection.
hints -- List of hints which occurred during action handling (list hint).
_current_user -- Cache for current user (user).
"""
SESSION_COOKIE = 'session'
USER_NAME_COOKIE = 'user'
def __init__(self, db):
self.db = db
self.hints = []
self._current_user = None
def action(self, language, pw_hash_iterations, admin_user):
""" Handle actions. Returns users or redirects. """
# Validate user and initialize admin user if necessary.
self.validate_login(pw_hash_iterations, admin_user)
is_admin = (self.current_user().name == admin_user)
if is_admin:
users = self.__action_admin(language, pw_hash_iterations)
else:
users = self.__action_user(language, pw_hash_iterations)
return users
def current_user(self):
""" Returns the current user or None. """
if not self._current_user:
user_name = self.get_cookie(self.USER_NAME_COOKIE)
if user_name:
self._current_user = user_entity.User.find_name(self.db, user_name)
return self._current_user
def login(self, language, pw_hash_iterations, admin_user):
""" Handle login. Returns hints or redirects. """
_ = translator.Translator.instance(language)
if self.get_form('action') == 'login':
user_name = self.get_form('name')
password = self.get_form('password', False)
if not user_name or not password:
hint_text = _('Please provide your user name and password.')
self.hints.append(hint.Hint(hint_text))
return self.hints
user = user_entity.User.find_name(self.db, user_name)
hint_text = _('Given user name or password is wrong. Please '
'try again.')
redirect_url = url.Url.from_path([''])
if not user:
user_count = user_entity.User.count_all(self.db)
if not user_count and user_name == admin_user:
admin_hash = self.__admin_hash(admin_user,
pw_hash_iterations)
self.set_cookie(self.SESSION_COOKIE, admin_hash)
redirect(redirect_url)
else:
self.hints.append(hint.Hint(hint_text))
return self.hints
pw_hash = self.__generate_pw_hash(password, user.salt,
pw_hash_iterations)
if pw_hash != user.pw_hash:
self.hints.append(hint.Hint(hint_text))
return self.hints
else:
session_salt = self.__generate_salt()
session = self.__generate_pw_hash(session_salt, user.salt,
pw_hash_iterations)
expires = datetime.datetime.now() + datetime.timedelta(days=365)
self.set_cookie(self.USER_NAME_COOKIE, user.name, expires=expires)
self.set_cookie(self.SESSION_COOKIE, session, expires=expires)
user.session = session
user.save(self.db)
redirect(redirect_url)
return self.hints
def logout(self):
""" Log user out. """
user = self.current_user()
if user and user.id:
user.session = None
user.save(self.db)
self.delete_cookie(self.USER_NAME_COOKIE)
self.delete_cookie(self.SESSION_COOKIE)
redirect(url.Url.from_path(['']))
def validate_login(self, pw_hash_iterations, admin_user):
""" Validates if a user is logged in. """
session = self.get_cookie(self.SESSION_COOKIE)
user = self.current_user()
if not user or not user.session or user.session != session:
user_count = user_entity.User.count_all(self.db)
admin_hash = self.__admin_hash(admin_user, pw_hash_iterations)
is_admin = (session == admin_hash)
if is_admin:
# Create fake user. Dangerous! Always check "user and user.id".
self._current_user = user_entity.User(name=admin_user)
if user_count or not is_admin:
redirect(url.Url.from_path(['login']))
def __action_admin(self, language, pw_hash_iterations):
""" Handle admin actions. Returns users or redirects. """
_ = translator.Translator.instance(language)
action = self.get_form('action')
if action == 'new':
user_name = self.get_form('name')
password = self.get_form('password', False)
password_confirm = self.get_form('password-confirm', False)
if not user_name or not password:
hint_text = _('Please provide user name and password.')
self.hints.append(hint.Hint(hint_text))
elif password != password_confirm:
hint_text = _('Both passwords are not the same.')
self.hints.append(hint.Hint(hint_text))
elif user_entity.User.find_name(self.db, user_name):
hint_text = _('User name is already taken.')
self.hints.append(hint.Hint(hint_text))
else:
user = user_entity.User(name=user_name)
user.salt = self.__generate_salt()
user.pw_hash = self.__generate_pw_hash(password, user.salt,
pw_hash_iterations)
user.save(self.db)
hint_text = _('User "{}" has been created.').format(user_name)
self.hints.append(hint.Hint(hint_text))
elif action == 'edit':
id = self.get_form('id')
user = user_entity.User.find_pk(self.db, id)
is_delete = self.get_form('delete') is not None
if is_delete:
hint_text = _('User "{}" has been deleted.').format(user.name)
user.delete(self.db)
self.hints.append(hint.Hint(hint_text))
else:
password = self.get_form('password', False)
password_confirm = self.get_form('password-confirm', False)
if not password:
hint_text = _('Please provide the password.')
self.hints.append(hint.Hint(hint_text))
elif password != password_confirm:
hint_text = _('Both passwords are not the same.')
self.hints.append(hint.Hint(hint_text))
else:
user.salt = self.__generate_salt()
user.pw_hash = self.__generate_pw_hash(password, user.salt,
pw_hash_iterations)
user.session = None
user.save(self.db)
hint_text = _('User "{}" has been updated.').format(user.name)
self.hints.append(hint.Hint(hint_text))
return user_entity.User.find_all(self.db)
def __action_user(self, language, pw_hash_iterations):
""" Handle user actions. Returns users or redirects. """
_ = translator.Translator.instance(language)
action = self.get_form('action')
if action == 'edit-profile':
password = self.get_form('password', False)
password_confirm = self.get_form('password-confirm', False)
if not password:
hint_text = _('Please provide the password.')
self.hints.append(hint.Hint(hint_text))
elif password != password_confirm:
hint_text = _('Both passwords are not the same.')
self.hints.append(hint.Hint(hint_text))
else:
user = self.current_user()
user.salt = self.__generate_salt()
user.pw_hash = self.__generate_pw_hash(password, user.salt,
pw_hash_iterations)
user.session = None
user.save(self.db)
hint_text = _('Your profile has been updated. Please login '
'again.')
self.hints.append(hint.Hint(hint_text))
return []
def __admin_hash(self, admin_user, iterations):
""" Calculate admin hash for first login. """
return self.__generate_pw_hash(admin_user, admin_user,
iterations)
def __generate_pw_hash(self, password, salt, iterations):
""" Returns the password hash. """
pw_hash = hashlib.sha512(bytes(password + salt, 'utf-8')).hexdigest()
for x in range(0, iterations):
pw_hash = hashlib.sha512(bytes(pw_hash, 'utf-8')).hexdigest()
return pw_hash
def __generate_salt(self):
""" Generates a random salt. """
return uuid.uuid4().hex
|
|
# External Module Dependencies
from xml.dom import minidom
import re
from time import time as current_time
import os
# Internal Module Dependencies
from misc4rings import isCCW, closestColor, isApproxClosedPath
from andysmod import format_time, Radius, consecutive_pairs
from andysSVGpathTools import polylineStr2pathStr, isClosedPathStr
from rings4rings import Ring
from svgpathtools import parse_path, Path, disvg, wsvg
# Options
from options4rings import colordict
import options4rings as opt
disvg = disvg if opt.try_to_open_svgs_in_browser else wsvg
def askUserOrientation():
dec = input("Enter 'y' or 'n' to specify orientation, or \n"
"enter 'r' to ignore this path and not include it in the "
"fixed svg that will be output, or\n"
"enter 'e' to terminate this session: ")
if dec == 'y':
return True
elif dec == 'n':
return False
elif dec == 'r':
return 'remove'
elif dec == 'e':
raise Exception("User-forced exit.")
else:
askUserOrientation()
def get_stroke(elem):
"""get 'stroke' attribute fom xml object"""
troubleFlag = False
stroke = elem.getAttribute('stroke') # sometimes this works
if stroke == '':
style = elem.getAttribute('style')
hexstart = style.find('stroke')
if hexstart == -1:
troubleFlag = True
else:
temp = style[hexstart:]
try:
stroke = re.search(re.compile('\#[a-fA-F0-9]*'), temp).group()
except:
troubleFlag = True
stroke = ''
if troubleFlag:
global already_warned_having_trouble_extracting_ring_colors
if not already_warned_having_trouble_extracting_ring_colors:
already_warned_having_trouble_extracting_ring_colors = True
opt.warnings_output_on.dprint(
"Warning: Having trouble extracting hex colors from svg. "
"Hopefully this will not matter as the palette check "
"will fix the colors.")
return stroke.upper()
def get_center(doc):
"""Find the center mark/line and return its centroid."""
counter = 0
centerFound = False
for elem in doc.getElementsByTagName('line'):
if get_stroke(elem) == colordict['center']:
center = 0.5 * float(elem.getAttribute('x1')) + \
0.5 * float(elem.getAttribute('x2')) + \
0.5 * float(elem.getAttribute('y1')) * 1j + \
0.5 * float(elem.getAttribute('y2')) * 1j
centerFound = True
break
else:
counter += 1
if not centerFound:
potential_center_paths = [parse_path(elem.getAttribute('d'))
for elem in doc.getElementsByTagName('path')
if get_stroke(elem) == colordict['center']]
if len(potential_center_paths) == 1:
center_path = potential_center_paths[0]
center = 0.5 * center_path.start + 0.5 * center_path.end
centerFound = True
elif len(potential_center_paths) > 1:
raise Exception("Multiple paths found with center color {}."
"".format(colordict['center']))
if not centerFound and counter > 0:
opt.warnings_output_on.dprint(
"[Warning:] No line objects in the svg were found matching "
"the center color (%s). Now searching for lines of a color "
"closer to center color than other colors." % counter)
for elem in doc.getElementsByTagName('line'):
elem_stroke = get_stroke(elem)
if len(elem_stroke) == 0:
opt.warnings_output_on.dprint(
'[Warning:] stroke has no length -- make a "stroke" '
'attribute is included and no CSS classes are being used.')
elif closestColor(get_stroke(elem), colordict) == colordict['center']:
center = 0.5 * float(elem.getAttribute('x1')) + \
0.5 * float(elem.getAttribute('x2')) + \
0.5 * float(elem.getAttribute('y1')) * 1j + \
0.5 * float(elem.getAttribute('y2')) * 1j
centerFound = True
counter -= 1
break
if counter > 0: # center found but counter>0
opt.warnings_output_on.dprint(
"[Warning:] There are %s disconnected lines in this SVG not "
"matching the center color. They will be ignored." % counter)
if not centerFound:
# tell user
example_center = \
r'<line fill="none" stroke="#0000FF" stroke-width="0.15" ' \
r'x1="246.143" y1="380.017" x2="246.765" y2="380.856"/>'
try:
if counter == 0:
# Is there a path with the center color?
other_pathlike_elements = doc.getElementsByTagName('path') + \
doc.getElementsByTagName('polyline') + \
doc.getElementsByTagName('polygon')
for elem in other_pathlike_elements:
if get_stroke(elem) == colordict['center']:
if elem in doc.getElementsByTagName('path'):
obtype = 'path'
pathstr = elem.getAttribute('d')
elif elem in doc.getElementsByTagName('polyline'):
obtype = 'polyline'
pathstr = polylineStr2pathStr(elem.getAttribute('points'))
else:
obtype = 'polygon'
pathstr = polylineStr2pathStr(elem.getAttribute('points')) + 'z'
centerpath = parse_path(pathstr)
start, end = centerpath.point(0.25), centerpath.point(0.75)
x1, x2, y1, y2 = start.real, end.real, start.imag, end.imag
newelem = \
r'<line fill="none" stroke="%s" stroke-width="0.05" ' \
r'stroke-miterlimit="10" x1="%s" y1="%s" x2="%s" y2="%s"/>' \
r'' % (colordict['center'], x1, y1, x2, y2)
raise Exception(
"Center of sample should be marked by line of "
"color %s, but no lines are present in svg. "
"There is a %s with the center color, however. "
"Open the svg file in a text editor and you "
"should be able to find '%s' somewhere... "
"replace it with '%s'" % (
colordict['center'], obtype, elem, newelem))
else:
for elem in doc.getElementsByTagName('path') + doc.getElementsByTagName(
'polyline') + doc.getElementsByTagName('polygon'):
if closestColor(get_stroke(elem), colordict) == colordict['center']:
if elem in doc.getElementsByTagName('path'):
obtype = 'path'
pathstr = elem.getAttribute('d')
elif elem in doc.getElementsByTagName('polyline'):
obtype = 'polyline'
pathstr = polylineStr2pathStr(elem.getAttribute('points'))
else:
obtype = 'polygon'
pathstr = polylineStr2pathStr(elem.getAttribute('points')) + 'z'
centerpath = parse_path(pathstr)
start, end = centerpath.point(0.25), centerpath.point(0.75)
x1, x2, y1, y2 = start.real, end.real, start.imag, end.imag
newelem = \
r'<line fill="none" stroke="%s" ' \
r'stroke-width="0.05" stroke-miterlimit="10" ' \
r'x1="%s" y1="%s" x2="%s" y2="%s"/>' \
r'' % (colordict['center'], x1, y1, x2, y2)
raise Exception(
"Center of sample should be marked by "
"line of color %s, but no lines are present "
"in svg. There is a path with color close "
"to the center color, however. Open the svg "
"file in a text editor and you should be able "
"to find '%s' somewhere... replace it with '%s'"
"" % (colordict['center'], obtype, elem, newelem))
else:
raise Exception(
'Center of sample should be marked by line '
'of color %s, but no lines are present in svg. '
'There were no paths or polylines or polygons '
'of a similar color either. Looks like you '
'did not mark the center. Open your svg in '
'a text editor and search for something that '
'looks like (with different x1, x2, y1, y2 values) \n%s\n'
'' % (colordict['center'], example_center))
except:
raise Exception(
'No center found searching line element with (color) '
'stroke = %s. Open your svg in a text editor and search '
'for something that looks like (with different '
'x1, x2, y1, y2 values) \n%s\n'
'' % (colordict['center'], example_center))
return center
def svg2rings(filename):
global already_warned_having_trouble_extracting_ring_colors
already_warned_having_trouble_extracting_ring_colors = False
doc = minidom.parse(filename) # parseString also exists
# find the center mark/line and get its centroid
center = get_center(doc)
# ##################################################################
# get path data as tuples of form:
# (d-string, stroke, tag, xml)
# ##################################################################
# Use minidom to extract path strings from input SVG
opt.basic_output_on.dprint("Extracting path_data from SVG... ", 'nr')
path_data = [(p.getAttribute('d'), get_stroke(p),
p.parentNode.getAttribute('id'), p.toxml())
for p in doc.getElementsByTagName('path')
if get_stroke(p) != colordict['center']]
# extract polylines
path_data += [
(polylineStr2pathStr(p.getAttribute('points')),
get_stroke(p), p.parentNode.getAttribute('id'), p.toxml())
for p in doc.getElementsByTagName('polyline')]
# extract polygons
path_data += [
(polylineStr2pathStr(p.getAttribute('points')) + 'z',
get_stroke(p), p.parentNode.getAttribute('id'), p.toxml())
for p in doc.getElementsByTagName('polygon')]
doc.unlink()
opt.basic_output_on.dprint("Done.")
# Convert path_data to ring objects
opt.basic_output_on.dprint(
"Converting path strings to Ring objects. "
"This could take a minute... ", 'nr')
path2ring_start_time = current_time()
ring_list = []
paths_of_unknown_orientation = []
for k, (dstring, stroke, tag, xml) in enumerate(path_data):
# skip center line
if stroke == opt.colordict['center']:
continue
path = parse_path(dstring)
if len(path) == 0:
continue # path with single point
# removed small and repeated segments
assert path.iscontinuous()
path = remove_degenerate_segments(path)
path = remove_duplicate_segments(path)
if not path.iscontinuous():
discontinuities = \
[s1.start for s0, s1 in consecutive_pairs(path)
if s0.end != s1.start]
dbfn = os.path.join(opt.output_directory_debug, 'problem.svg')
wsvg(path, nodes=discontinuities, filename=dbfn)
raise Exception(
'Something went wrong wil trying remove small and '
f'repeated segments. Please see "{dbfn}" -- there '
f'should be nodes highlighting where the problem '
f'occurred though the problem may not be visually '
f'obvious (e.g. might be related to '
f'overlapping/repeated path segments).')
# check that no too many segments were removed
if len(path) == 0:
original_path = parse_path(dstring)
if original_path.length() > opt.appropriate_ring_length_minimum:
raise Exception(
"A path that was acceptable is no longer long enough")
# fix the orientation if path is not CCW (w.r.t. center)
path_is_ccw = 'unknown'
try:
path_is_ccw = isCCW(path, center)
except:
if opt.manually_fix_orientations:
path_is_ccw = \
resolve_orientation_manually(path, center, ring_list)
else:
paths_of_unknown_orientation.append((dstring, stroke, tag, xml))
if opt.when_orientation_cannot_be_determined_assume_CCW:
path_is_ccw = True
if not path_is_ccw:
path = path.reversed()
opt.full_output_on.dprint(
"Path %s was not oriented CCW, but is now." % k)
elif path_is_ccw == 'remove':
continue # don't include this path in ring_list
elif path_is_ccw == 'unknown':
pass # include this path and hope everything is ok
ring_list.append(Ring(path_string=dstring,
color=stroke,
brook_tag=tag,
rad=Radius(center),
path=path,
xml=xml))
opt.full_output_on.dprint("Ring %s ok" % k)
if len(paths_of_unknown_orientation) > 0:
if opt.when_orientation_cannot_be_determined_assume_CCW:
orientation = 'Counterclockwise'
else:
orientation = 'Clockwise'
ccw_warning = "[Warning:] Unable to determine orientation of %s " \
"paths. This is likely because some paths in this " \
"sample are far from being convex. I assumed that " \
"these paths were traced in a %s fashion (to " \
"change this assumption, set 'when_orientation_cannot_" \
"be_determined_assume_CCW = %s' in options. " \
"If this assumption is false, either the program " \
"will crash or the transect will be visibly messed " \
"up in the output 'xxx_transects.svg' (where xxx " \
"is the input svg's filename sans extension)." \
"" % (len(paths_of_unknown_orientation), orientation,
not opt.when_orientation_cannot_be_determined_assume_CCW)
opt.warnings_output_on.dprint(ccw_warning)
if len(paths_of_unknown_orientation) > 1:
opt.warnings_output_on.dprint(
"If think you were not consistent tracing in either CCW or "
"CW fashion (or don't get good output from this file) then "
"set 'manually_fix_orientations = True' in options.")
# done extracting rings from svg
opt.basic_output_on.dprint(
"Done (in %s)." % format_time(current_time() - path2ring_start_time))
opt.basic_output_on.dprint(
"Completed extracting rings from SVG. %s rings detected." % len(ring_list))
return center, ring_list
def remove_degenerate_segments(path):
"""remove degenerate segments, making sure path remains continuous
Args:
path: A Path object or list of segment objects
Returns:
Path object with degenerate segments removed
"""
# remove degenerate segments, making sure path remains continuous
degenerate_segment_indices = []
for k, seg in enumerate(path):
if abs(seg.start - seg.end) < opt.min_absolute_segment_length:
degenerate_segment_indices.append(k)
# fix discontinuities caused by removing degenerate segment
prev_seg_index = (k - 1) % len(path)
next_seg_index = (k + 1) % len(path)
prev_seg, next_seg = path[prev_seg_index], path[next_seg_index]
if prev_seg.end == seg.start and seg.end == next_seg.start:
# continuous from prev_seg through next_seg
prev_seg.end = next_seg.start
elif prev_seg.end == seg.start:
prev_seg.end = seg.end
elif seg.end == next_seg.start:
next_seg.start = seg.start
if len(degenerate_segment_indices) > 0:
opt.full_output_on.dprint(
"[Warning:] Found removing the following degenerate "
"segments:\n%s"
"" % '\n'.join(str(path[k]) for k in degenerate_segment_indices)
)
path = Path(*[seg for k, seg in enumerate(path)
if k not in degenerate_segment_indices])
return path
def remove_duplicate_segments(path):
"""Remove check repeated and doubled-over segments
Args:
path: A Path object or list of segment objects
Returns:
Path object with duplicate segments removed
"""
if len(path) <= 1:
return path
# check for repeated and doubled-over segments
duplicate_segment_indices = []
for k, seg in enumerate(path):
next_seg_index = (k + 1) % len(path)
next_seg = path[next_seg_index]
if seg == next_seg or seg == next_seg.reversed():
duplicate_segment_indices.append(next_seg_index)
if len(duplicate_segment_indices) > 0:
opt.full_output_on.dprint(
"[Warning:] Found and removing the following duplicate or "
"doubled-over segments:\n%s"
"" % '\n'.join(str(path[k]) for k in duplicate_segment_indices)
)
path = Path(*[seg for k, seg in enumerate(path)
if k not in duplicate_segment_indices])
return path
def resolve_orientation_manually(path, center, ring_list):
print("\n[Manually Fix Orientations:] As currently drawn, the "
"path starts at the green node/segment and ends at the "
"red (if you don't see one of these nodes, it's likely "
"cause the path is very short and thus they are on top "
"of each other). Does the path in "
"'temporary_4manualOrientation.svg' appear to be drawn "
"in a clockwise fashion?")
if len(path) == 1:
disp_paths = [path]
disp_path_colors = ['blue']
elif len(path) == 2:
disp_paths = [Path(path[0]), Path(path[1])]
disp_path_colors = ['green', 'red']
elif len(path) > 2:
disp_paths = [Path(path[0]),
Path(path[1:-1]),
Path(path[-1])]
disp_path_colors = ['green', 'blue', 'red']
else:
raise Exception("This path is empty... this should never "
"happen. Tell Andy.")
for ring in ring_list:
disp_paths.append(ring.path)
disp_path_colors.append('black')
nodes = [path[0].start, path[-1].end] + [center]
node_colors = ['green', 'red'] + [colordict['center']]
disvg(disp_paths, disp_path_colors, nodes=nodes, node_colors=node_colors,
filename='temporary_4manualOrientation.svg')
# svg display reverses orientation so a response of
# 'yes' means path is actually ccw and thus sets
# path_is_ccw = True
path_is_ccw = askUserOrientation()
if path_is_ccw == 'remove':
print("OK, this path will be ignored... moving onto the rest.")
return path_is_ccw
def palette_check(ring_list):
opt.basic_output_on.dprint("Palette check running... ", 'nr')
boundary_ring = max([r for r in ring_list if r.isClosed], key=lambda r: r.maxR)
fixed_count = 0
for ring in ring_list:
color = ring.color
if color not in colordict.values():
if opt.auto_fix_ring_colors:
newcolor = closestColor(color, colordict)
ring.color = newcolor
opt.colorcheck_output_on.dprint('C' * 70)
opt.colorcheck_output_on.dprint(
'WARNING: SVG-creator used a color used that is not '
'in dictionary.')
opt.colorcheck_output_on.dprint(
'...changing this ring from %s to %s.' % (color, newcolor))
opt.colorcheck_output_on.dprint(
'newcolor in colordict = %s' % (newcolor in colordict))
fixed_count += 1
else:
raise Exception('color used that is not in dictionary')
if color in [colordict['safe1'], colordict['safe2']]:
if ring == boundary_ring:
ring.color = colordict['boundary']
else:
ring.color = colordict['complete']
else:
opt.basic_output_on.dprint(
"Palette check passed after fixing %s rings.\n" % fixed_count)
return ring_list
def closedness_consistency_check(ring_list):
opt.basic_output_on.dprint("Closedness consistency check running... ", 'nr')
failed_rings = []
for ring in ring_list:
# checks for z at end of path string
ztest = isClosedPathStr(ring.string)
# equivalent to isApproxClosedPath(ring.path)
atest = isApproxClosedPath(ring.path)
ctest = ring.color in {colordict['complete'], colordict['boundary']}
if not ((ztest and atest and ctest) or (not ztest and not atest and not ctest)):
# import ipdb; ipdb.set_trace()
failed_rings.append(ring)
opt.closednessCheck_output_on.dprint(
"A ring failed the closedness consistency check.")
opt.closednessCheck_output_on.dprint("Path String: " + str(ring.string))
opt.closednessCheck_output_on.dprint("color of path: " + str(ring.color))
opt.closednessCheck_output_on.dprint("closed by z: " + str(ztest))
opt.closednessCheck_output_on.dprint("approx closed: " + str(atest))
opt.closednessCheck_output_on.dprint("closed color: " + str(ctest))
opt.closednessCheck_output_on.dprint("")
if opt.auto_fix_ring_colors:
if ztest and not atest: # for ctest or not ctest
raise Exception("ztest and not atest")
elif ztest and atest: # and not ctest
ring.color = colordict["complete"]
opt.closednessCheck_output_on.dprint(
"Fixed: changed color to complete color.")
elif (not ztest) and atest:
if ctest:
ring.string += 'z'
opt.closednessCheck_output_on.dprint(
"Fixed: appended z to end of string.")
else:
opt.closednessCheck_output_on.dprint(
"Maybe this ring is fine as it. I've fixed "
"nothing. case: (not ztest) and atest and "
"(not ctest)")
elif (not ztest) and (not atest): # and ctest
ring.color = colordict["incomplete"]
opt.closednessCheck_output_on.dprint(
"Fixed: changed color to incomplete color.")
else:
raise Exception(
"This case should not logically occur. "
"There's a bug in my logic.")
if len(failed_rings) > 0:
opt.basic_output_on.dprint(
"Warning:Closedness consistency check failed for %s "
"(out of %s) rings. " % (len(failed_rings), len(ring_list)), 'nr')
opt.basic_output_on.dprint(
"I did my best to fix them; set closednessCheck_output_on "
"to True and run again to see the details of my fixes.\n")
else:
opt.basic_output_on.dprint(
"Closedness Consistency check passed with no failed rings.\n")
return ring_list
def visual_test_of_closed_ring_sort(ring_list):
visual_test_of_ring_sort([r for r in ring_list if r.isClosed()])
def visual_test_of_ring_sort(ring_list):
from andysSVGpathTools import svgSlideShow
from os import path as os_path
fileloc = os_path.join(
opt.output_directory, 'debug', 'ring_sort_slideshow')
opt.basic_output_on.dprint(
"Creating SVG slideshow showing ring sorting...", 'nr')
# create svg for each image in slideshow
pathcolortuplelist = []
for slide in range(len(ring_list)):
if slide == 0:
continue
elif slide == 1:
second_to_outer_most_path = \
next(r.path for r in ring_list if r.sort_index == 0)
else:
second_to_outer_most_path = outer_most_path
outer_most_path = \
next(r.path for r in ring_list if r.sort_index == slide)
paths = [ring.path for ring in ring_list if ring.sort_index < slide - 1] + \
[second_to_outer_most_path, outer_most_path]
colors = ['blue'] * (len(paths) - 2) + ['red'] + ['green']
pathcolortuplelist.append((paths, colors))
svgSlideShow(pathcolortuplelist, save_directory=fileloc,
clear_directory=True, suppressOutput=True)
opt.basic_output_on.dprint("Done.")
|
|
"""
Basically a parser that is faster, because it tries to parse only parts and if
anything changes, it only reparses the changed parts. But because it's not
finished (and still not working as I want), I won't document it any further.
"""
import re
from jedi._compatibility import use_metaclass
from jedi import settings
from jedi import parsing
from jedi import parsing_representation as pr
from jedi import cache
from jedi import common
SCOPE_CONTENTS = ['asserts', 'subscopes', 'imports', 'statements', 'returns']
class Module(pr.Simple, pr.Module):
def __init__(self, parsers):
super(Module, self).__init__(self, (1, 0))
self.parsers = parsers
self.reset_caches()
self.start_pos = 1, 0
self.end_pos = None, None
def reset_caches(self):
""" This module does a whole lot of caching, because it uses different
parsers. """
self._used_names = None
for p in self.parsers:
p.user_scope = None
p.user_stmt = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError('Not available!')
else:
return getattr(self.parsers[0].module, name)
@property
def used_names(self):
if self._used_names is None:
dct = {}
for p in self.parsers:
for k, statement_set in p.module.used_names.items():
if k in dct:
dct[k] |= statement_set
else:
dct[k] = set(statement_set)
self._used_names = dct
return self._used_names
def __repr__(self):
return "<%s: %s@%s-%s>" % (type(self).__name__, self.name,
self.start_pos[0], self.end_pos[0])
class CachedFastParser(type):
""" This is a metaclass for caching `FastParser`. """
def __call__(self, source, module_path=None, user_position=None):
if not settings.fast_parser:
return parsing.Parser(source, module_path, user_position)
pi = cache.parser_cache.get(module_path, None)
if pi is None or isinstance(pi.parser, parsing.Parser):
p = super(CachedFastParser, self).__call__(source, module_path,
user_position)
else:
p = pi.parser # pi is a `cache.ParserCacheItem`
p.update(source, user_position)
return p
class ParserNode(object):
def __init__(self, parser, code, parent=None):
self.parent = parent
self.code = code
self.hash = hash(code)
self.children = []
# must be created before new things are added to it.
self.save_contents(parser)
def save_contents(self, parser):
self.parser = parser
try:
# with fast_parser we have either 1 subscope or only statements.
self.content_scope = parser.module.subscopes[0]
except IndexError:
self.content_scope = parser.module
scope = self.content_scope
self._contents = {}
for c in SCOPE_CONTENTS:
self._contents[c] = list(getattr(scope, c))
self._is_generator = scope.is_generator
self.old_children = self.children
self.children = []
def reset_contents(self):
scope = self.content_scope
for key, c in self._contents.items():
setattr(scope, key, list(c))
scope.is_generator = self._is_generator
self.parser.user_scope = self.parser.module
if self.parent is None:
# Global vars of the first one can be deleted, in the global scope
# they make no sense.
self.parser.module.global_vars = []
for c in self.children:
c.reset_contents()
def parent_until_indent(self, indent=None):
if indent is None or self.indent >= indent and self.parent:
self.old_children = []
if self.parent is not None:
return self.parent.parent_until_indent(indent)
return self
@property
def indent(self):
if not self.parent:
return 0
module = self.parser.module
try:
el = module.subscopes[0]
except IndexError:
try:
el = module.statements[0]
except IndexError:
try:
el = module.imports[0]
except IndexError:
try:
el = [r for r in module.returns if r is not None][0]
except IndexError:
return self.parent.indent + 1
return el.start_pos[1]
def _set_items(self, parser, set_parent=False):
# insert parser objects into current structure
scope = self.content_scope
for c in SCOPE_CONTENTS:
content = getattr(scope, c)
items = getattr(parser.module, c)
if set_parent:
for i in items:
if i is None:
continue # happens with empty returns
i.parent = scope.use_as_parent
if isinstance(i, (pr.Function, pr.Class)):
for d in i.decorators:
d.parent = scope.use_as_parent
content += items
# global_vars
cur = self
while cur.parent is not None:
cur = cur.parent
cur.parser.module.global_vars += parser.module.global_vars
scope.is_generator |= parser.module.is_generator
def add_node(self, node, set_parent=False):
"""Adding a node means adding a node that was already added earlier"""
self.children.append(node)
self._set_items(node.parser, set_parent=set_parent)
node.old_children = node.children
node.children = []
return node
def add_parser(self, parser, code):
return self.add_node(ParserNode(parser, code, self), True)
class FastParser(use_metaclass(CachedFastParser)):
def __init__(self, code, module_path=None, user_position=None):
# set values like `pr.Module`.
self.module_path = module_path
self.user_position = user_position
self._user_scope = None
self.current_node = None
self.parsers = []
self.module = Module(self.parsers)
self.reset_caches()
try:
self._parse(code)
except:
# FastParser is cached, be careful with exceptions
self.parsers[:] = []
raise
@property
def user_scope(self):
if self._user_scope is None:
for p in self.parsers:
if p.user_scope:
if isinstance(p.user_scope, pr.SubModule):
continue
self._user_scope = p.user_scope
if isinstance(self._user_scope, pr.SubModule) \
or self._user_scope is None:
self._user_scope = self.module
return self._user_scope
@property
def user_stmt(self):
if self._user_stmt is None:
for p in self.parsers:
if p.user_stmt:
self._user_stmt = p.user_stmt
break
return self._user_stmt
def update(self, code, user_position=None):
self.user_position = user_position
self.reset_caches()
try:
self._parse(code)
except:
# FastParser is cached, be careful with exceptions
self.parsers[:] = []
raise
def _scan_user_scope(self, sub_module):
""" Scan with self.user_position. """
for scope in sub_module.statements + sub_module.subscopes:
if isinstance(scope, pr.Scope):
if scope.start_pos <= self.user_position <= scope.end_pos:
return self._scan_user_scope(scope) or scope
return None
def _split_parts(self, code):
"""
Split the code into different parts. This makes it possible to parse
each part seperately and therefore cache parts of the file and not
everything.
"""
def add_part():
txt = '\n'.join(current_lines)
if txt:
if add_to_last and parts:
parts[-1] += '\n' + txt
else:
parts.append(txt)
current_lines[:] = []
r_keyword = '^[ \t]*(def|class|@|%s)' % '|'.join(common.FLOWS)
lines = code.splitlines()
current_lines = []
parts = []
is_decorator = False
current_indent = 0
old_indent = 0
new_indent = False
in_flow = False
add_to_last = False
# All things within flows are simply being ignored.
for i, l in enumerate(lines):
# check for dedents
m = re.match('^([\t ]*)(.?)', l)
indent = len(m.group(1))
if m.group(2) in ['', '#']:
current_lines.append(l) # just ignore comments and blank lines
continue
if indent < current_indent: # -> dedent
current_indent = indent
new_indent = False
if not in_flow or indent < old_indent:
add_part()
add_to_last = False
in_flow = False
elif new_indent:
current_indent = indent
new_indent = False
# Check lines for functions/classes and split the code there.
if not in_flow:
m = re.match(r_keyword, l)
if m:
in_flow = m.group(1) in common.FLOWS
if not is_decorator and not in_flow:
add_part()
add_to_last = False
is_decorator = '@' == m.group(1)
if not is_decorator:
old_indent = current_indent
current_indent += 1 # it must be higher
new_indent = True
elif is_decorator:
is_decorator = False
add_to_last = True
current_lines.append(l)
add_part()
return parts
def _parse(self, code):
""" :type code: str """
def empty_parser():
new, temp = self._get_parser('', '', 0, [], False)
return new
parts = self._split_parts(code)
self.parsers[:] = []
line_offset = 0
start = 0
p = None
is_first = True
for code_part in parts:
lines = code_part.count('\n') + 1
if is_first or line_offset >= p.end_pos[0]:
indent = len(re.match(r'[ \t]*', code_part).group(0))
if is_first and self.current_node is not None:
nodes = [self.current_node]
else:
nodes = []
if self.current_node is not None:
self.current_node = \
self.current_node.parent_until_indent(indent)
nodes += self.current_node.old_children
# check if code_part has already been parsed
# print '#'*45,line_offset, p and p.end_pos, '\n', code_part
p, node = self._get_parser(code_part, code[start:],
line_offset, nodes, not is_first)
if is_first and p.module.subscopes:
# special case, we cannot use a function subscope as a
# base scope, subscopes would save all the other contents
new = empty_parser()
if self.current_node is None:
self.current_node = ParserNode(new, '')
else:
self.current_node.save_contents(new)
self.parsers.append(new)
is_first = False
if is_first:
if self.current_node is None:
self.current_node = ParserNode(p, code_part)
else:
self.current_node.save_contents(p)
else:
if node is None:
self.current_node = \
self.current_node.add_parser(p, code_part)
else:
self.current_node = self.current_node.add_node(node)
if self.current_node.parent and (isinstance(p.user_scope,
pr.SubModule) or p.user_scope is None) \
and self.user_position \
and p.start_pos <= self.user_position < p.end_pos:
p.user_scope = self.current_node.parent.content_scope
self.parsers.append(p)
is_first = False
else:
# print '#'*45, line_offset, p.end_pos, 'theheck\n', code_part
pass
line_offset += lines
start += len(code_part) + 1 # +1 for newline
if self.parsers:
self.current_node = self.current_node.parent_until_indent()
else:
self.parsers.append(empty_parser())
self.module.end_pos = self.parsers[-1].end_pos
# print(self.parsers[0].module.get_code())
del code
def _get_parser(self, code, parser_code, line_offset, nodes, no_docstr):
h = hash(code)
hashes = [n.hash for n in nodes]
node = None
try:
index = hashes.index(h)
if nodes[index].code != code:
raise ValueError()
except ValueError:
p = parsing.Parser(parser_code, self.module_path,
self.user_position, offset=(line_offset, 0),
is_fast_parser=True, top_module=self.module,
no_docstr=no_docstr)
p.module.parent = self.module
else:
if nodes[index] != self.current_node:
offset = int(nodes[0] == self.current_node)
self.current_node.old_children.pop(index - offset)
node = nodes.pop(index)
p = node.parser
m = p.module
m.line_offset += line_offset + 1 - m.start_pos[0]
if self.user_position is not None and \
m.start_pos[0] <= self.user_position[0] <= m.end_pos[0]:
# It's important to take care of the whole user
# positioning stuff, if no reparsing is being done.
p.user_stmt = m.get_statement_for_position(
self.user_position, include_imports=True)
if p.user_stmt:
p.user_scope = p.user_stmt.parent
else:
p.user_scope = self._scan_user_scope(m) or m
return p, node
def reset_caches(self):
self._user_scope = None
self._user_stmt = None
self.module.reset_caches()
if self.current_node is not None:
self.current_node.reset_contents()
|
|
# file openpyxl/writer/straight_worksheet.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Write worksheets to xml representations in an optimized way"""
import datetime
import os
from ..cell import column_index_from_string, get_column_letter, Cell
from ..worksheet import Worksheet
from ..shared.xmltools import XMLGenerator, get_document_content, \
start_tag, end_tag, tag
from ..shared.date_time import SharedDate
from ..shared.ooxml import MAX_COLUMN, MAX_ROW
from tempfile import NamedTemporaryFile
from ..writer.excel import ExcelWriter
from ..writer.strings import write_string_table
from ..writer.styles import StyleWriter
from ..style import Style, NumberFormat
from ..shared.ooxml import ARC_SHARED_STRINGS, ARC_CONTENT_TYPES, \
ARC_ROOT_RELS, ARC_WORKBOOK_RELS, ARC_APP, ARC_CORE, ARC_THEME, \
ARC_STYLE, ARC_WORKBOOK, \
PACKAGE_WORKSHEETS, PACKAGE_DRAWINGS, PACKAGE_CHARTS
STYLES = {'datetime' : {'type':Cell.TYPE_NUMERIC,
'style':'1'},
'string':{'type':Cell.TYPE_STRING,
'style':'0'},
'numeric':{'type':Cell.TYPE_NUMERIC,
'style':'0'},
'formula':{'type':Cell.TYPE_FORMULA,
'style':'0'},
'boolean':{'type':Cell.TYPE_BOOL,
'style':'0'},
}
DATETIME_STYLE = Style()
DATETIME_STYLE.number_format.format_code = NumberFormat.FORMAT_DATE_YYYYMMDD2
BOUNDING_BOX_PLACEHOLDER = 'A1:%s%d' % (get_column_letter(MAX_COLUMN), MAX_ROW)
class DumpWorksheet(Worksheet):
"""
.. warning::
You shouldn't initialize this yourself, use :class:`..workbook.Workbook` constructor instead,
with `optimized_write = True`.
"""
def __init__(self, parent_workbook):
Worksheet.__init__(self, parent_workbook)
self._max_col = 0
self._max_row = 0
self._parent = parent_workbook
self._fileobj_header = NamedTemporaryFile(mode='r+', prefix='..', suffix='.header', delete=False)
self._fileobj_content = NamedTemporaryFile(mode='r+', prefix='..', suffix='.content', delete=False)
self._fileobj = NamedTemporaryFile(mode='w', prefix='..', delete=False)
self.doc = XMLGenerator(self._fileobj_content, 'utf-8')
self.header = XMLGenerator(self._fileobj_header, 'utf-8')
self.title = 'Sheet'
self._shared_date = SharedDate()
self._string_builder = self._parent.strings_table_builder
@property
def filename(self):
return self._fileobj.name
def write_header(self):
doc = self.header
start_tag(doc, 'worksheet',
{'xml:space': 'preserve',
'xmlns': 'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'xmlns:r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'})
start_tag(doc, 'sheetPr')
tag(doc, 'outlinePr',
{'summaryBelow': '1',
'summaryRight': '1'})
end_tag(doc, 'sheetPr')
tag(doc, 'dimension', {'ref': 'A1:%s' % (self.get_dimensions())})
start_tag(doc, 'sheetViews')
start_tag(doc, 'sheetView', {'workbookViewId': '0'})
tag(doc, 'selection', {'activeCell': 'A1',
'sqref': 'A1'})
end_tag(doc, 'sheetView')
end_tag(doc, 'sheetViews')
tag(doc, 'sheetFormatPr', {'defaultRowHeight': '15'})
start_tag(doc, 'sheetData')
def close(self):
self._close_content()
self._close_header()
self._write_fileobj(self._fileobj_header)
self._write_fileobj(self._fileobj_content)
self._fileobj.close()
def _write_fileobj(self, fobj):
fobj.flush()
fobj.seek(0)
while True:
chunk = fobj.read(4096)
if not chunk:
break
self._fileobj.write(chunk)
fobj.close()
os.remove(fobj.name)
self._fileobj.flush()
def _close_header(self):
doc = self.header
#doc.endDocument()
def _close_content(self):
doc = self.doc
end_tag(doc, 'sheetData')
end_tag(doc, 'worksheet')
#doc.endDocument()
def get_dimensions(self):
if not self._max_col or not self._max_row:
return 'A1'
else:
return '%s%d' % (get_column_letter(self._max_col), (self._max_row))
def append(self, row):
"""
:param row: iterable containing values to append
:type row: iterable
"""
doc = self.doc
self._max_row += 1
span = len(row)
self._max_col = max(self._max_col, span)
row_idx = self._max_row
attrs = {'r': '%d' % row_idx,
'spans': '1:%d' % span}
start_tag(doc, 'row', attrs)
for col_idx, cell in enumerate(row):
if cell is None:
continue
coordinate = '%s%d' % (get_column_letter(col_idx+1), row_idx)
attributes = {'r': coordinate}
if isinstance(cell, bool):
dtype = 'boolean'
elif isinstance(cell, (int, float)):
dtype = 'numeric'
elif isinstance(cell, (datetime.datetime, datetime.date)):
dtype = 'datetime'
cell = self._shared_date.datetime_to_julian(cell)
attributes['s'] = STYLES[dtype]['style']
elif cell and cell[0] == '=':
dtype = 'formula'
else:
dtype = 'string'
cell = self._string_builder.add(cell)
attributes['t'] = STYLES[dtype]['type']
start_tag(doc, 'c', attributes)
if dtype == 'formula':
tag(doc, 'f', body = '%s' % cell[1:])
tag(doc, 'v')
else:
tag(doc, 'v', body = '%s' % cell)
end_tag(doc, 'c')
end_tag(doc, 'row')
def save_dump(workbook, filename):
writer = ExcelDumpWriter(workbook)
writer.save(filename)
return True
class ExcelDumpWriter(ExcelWriter):
def __init__(self, workbook):
self.workbook = workbook
self.style_writer = StyleDumpWriter(workbook)
self.style_writer._style_list.append(DATETIME_STYLE)
def _write_string_table(self, archive):
shared_string_table = self.workbook.strings_table_builder.get_table()
archive.writestr(ARC_SHARED_STRINGS,
write_string_table(shared_string_table))
return shared_string_table
def _write_worksheets(self, archive, shared_string_table, style_writer):
for i, sheet in enumerate(self.workbook.worksheets):
sheet.write_header()
sheet.close()
archive.write(sheet.filename, PACKAGE_WORKSHEETS + '/sheet%d.xml' % (i + 1))
os.remove(sheet.filename)
class StyleDumpWriter(StyleWriter):
def _get_style_list(self, workbook):
return []
|
|
r"""
This paver file is intended to help with the release process as much as
possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
Building changelog + notes
==========================
Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
This automatically put the checksum into README.rst, and writes the Changelog.
TODO
====
- the script is messy, lots of global variables
- make it more easily customizable (through command line args)
- missing targets: install & test, sdist test, debian packaging
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
import os
import sys
import shutil
import hashlib
import textwrap
# The paver package needs to be installed to run tasks
import paver
from paver.easy import Bunch, options, task, sh
#-----------------------------------
# Things to be changed for a release
#-----------------------------------
# Path to the release notes
RELEASE_NOTES = 'doc/source/release/1.21.0-notes.rst'
#-------------------------------------------------------
# Hardcoded build/install dirs, virtualenv options, etc.
#-------------------------------------------------------
# Where to put the release installers
options(installers=Bunch(releasedir="release",
installersdir=os.path.join("release", "installers")),)
#------------------------
# Get the release version
#------------------------
sys.path.insert(0, os.path.dirname(__file__))
try:
from setup import FULLVERSION
finally:
sys.path.pop(0)
#--------------------------
# Source distribution stuff
#--------------------------
def tarball_name(ftype='gztar'):
"""Generate source distribution name
Parameters
----------
ftype : {'zip', 'gztar'}
Type of archive, default is 'gztar'.
"""
root = f'numpy-{FULLVERSION}'
if ftype == 'gztar':
return root + '.tar.gz'
elif ftype == 'zip':
return root + '.zip'
raise ValueError(f"Unknown type {type}")
@task
def sdist(options):
"""Make source distributions.
Parameters
----------
options :
Set by ``task`` decorator.
"""
# First clean the repo and update submodules (for up-to-date doc html theme
# and Sphinx extensions)
sh('git clean -xdf')
sh('git submodule init')
sh('git submodule update')
# To be sure to bypass paver when building sdist... paver + numpy.distutils
# do not play well together.
# Cython is run over all Cython files in setup.py, so generated C files
# will be included.
sh('python3 setup.py sdist --formats=gztar,zip')
# Copy the superpack into installers dir
idirs = options.installers.installersdir
if not os.path.exists(idirs):
os.makedirs(idirs)
for ftype in ['gztar', 'zip']:
source = os.path.join('dist', tarball_name(ftype))
target = os.path.join(idirs, tarball_name(ftype))
shutil.copy(source, target)
#-------------
# README stuff
#-------------
def _compute_hash(idirs, hashfunc):
"""Hash files using given hashfunc.
Parameters
----------
idirs : directory path
Directory containing files to be hashed.
hashfunc : hash function
Function to be used to hash the files.
"""
released = paver.path.path(idirs).listdir()
checksums = []
for fpath in sorted(released):
with open(fpath, 'rb') as fin:
fhash = hashfunc(fin.read())
checksums.append(
'%s %s' % (fhash.hexdigest(), os.path.basename(fpath)))
return checksums
def compute_md5(idirs):
"""Compute md5 hash of files in idirs.
Parameters
----------
idirs : directory path
Directory containing files to be hashed.
"""
return _compute_hash(idirs, hashlib.md5)
def compute_sha256(idirs):
"""Compute sha256 hash of files in idirs.
Parameters
----------
idirs : directory path
Directory containing files to be hashed.
"""
# better checksum so gpg signed README.rst containing the sums can be used
# to verify the binaries instead of signing all binaries
return _compute_hash(idirs, hashlib.sha256)
def write_release_task(options, filename='README'):
"""Append hashes of release files to release notes.
This appends file hashes to the release notes ane creates
four README files of the result in various formats:
- README.rst
- README.rst.gpg
- README.md
- README.md.gpg
The md file are created using `pandoc` so that the links are
properly updated. The gpg files are kept separate, so that
the unsigned files may be edited before signing if needed.
Parameters
----------
options :
Set by ``task`` decorator.
filename : string
Filename of the modified notes. The file is written
in the release directory.
"""
idirs = options.installers.installersdir
notes = paver.path.path(RELEASE_NOTES)
rst_readme = paver.path.path(filename + '.rst')
md_readme = paver.path.path(filename + '.md')
# append hashes
with open(rst_readme, 'w') as freadme:
with open(notes) as fnotes:
freadme.write(fnotes.read())
freadme.writelines(textwrap.dedent(
"""
Checksums
=========
MD5
---
::
"""))
freadme.writelines([f' {c}\n' for c in compute_md5(idirs)])
freadme.writelines(textwrap.dedent(
"""
SHA256
------
::
"""))
freadme.writelines([f' {c}\n' for c in compute_sha256(idirs)])
# generate md file using pandoc before signing
sh(f"pandoc -s -o {md_readme} {rst_readme}")
# Sign files
if hasattr(options, 'gpg_key'):
cmd = f'gpg --clearsign --armor --default_key {options.gpg_key}'
else:
cmd = 'gpg --clearsign --armor'
sh(cmd + f' --output {rst_readme}.gpg {rst_readme}')
sh(cmd + f' --output {md_readme}.gpg {md_readme}')
@task
def write_release(options):
"""Write the README files.
Two README files are generated from the release notes, one in ``rst``
markup for the general release, the other in ``md`` markup for the github
release notes.
Parameters
----------
options :
Set by ``task`` decorator.
"""
rdir = options.installers.releasedir
write_release_task(options, os.path.join(rdir, 'README'))
|
|
#!/usr/bin/env python2.6
# -*- coding: utf-8 -*-
"""
Open and modify Microsoft Word 2007 docx files (called 'OpenXML' and
'Office OpenXML' by Microsoft)
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
"""
import logging
from lxml import etree
try:
from PIL import Image
except ImportError:
import Image
import zipfile
import shutil
import re
import time
import os
from os.path import join
log = logging.getLogger(__name__)
# Record template directory's location which is just 'template' for a docx
# developer or 'site-packages/docx-template' if you have installed docx
template_dir = join(os.path.dirname(__file__), 'docx-template') # installed
if not os.path.isdir(template_dir):
template_dir = join(os.path.dirname(__file__), 'template') # dev
# All Word prefixes / namespace matches used in document.xml & core.xml.
# LXML doesn't actually use prefixes (just the real namespace) , but these
# make it easier to copy Word output more easily.
nsprefixes = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': 'http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing',
# Properties (core and extended)
'cp': 'http://schemas.openxmlformats.org/package/2006/metadata/core-properties',
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': 'http://schemas.openxmlformats.org/officeDocument/2006/extended-properties',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships',
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/'}
def opendocx(file):
'''Open a docx file, return a document XML tree'''
mydoc = zipfile.ZipFile(file)
xmlcontent = mydoc.read('word/document.xml')
document = etree.fromstring(xmlcontent)
return document
def newdocument():
document = makeelement('document')
document.append(makeelement('body'))
return document
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None, attrnsprefix=None):
'''Create an element & return it'''
# Deal with list of nsprefix by making namespacemap
namespacemap = None
if isinstance(nsprefix, list):
namespacemap = {}
for prefix in nsprefix:
namespacemap[prefix] = nsprefixes[prefix]
# FIXME: rest of code below expects a single prefix
nsprefix = nsprefix[0]
if nsprefix:
namespace = '{'+nsprefixes[nsprefix]+'}'
else:
# For when namespace = None
namespace = ''
newelement = etree.Element(namespace+tagname, nsmap=namespacemap)
# Add attributes with namespaces
if attributes:
# If they haven't bothered setting attribute namespace, use an empty string
# (equivalent of no namespace)
if not attrnsprefix:
# Quick hack: it seems every element that has a 'w' nsprefix for its tag uses the same prefix for it's attributes
if nsprefix == 'w':
attributenamespace = namespace
else:
attributenamespace = ''
else:
attributenamespace = '{'+nsprefixes[attrnsprefix]+'}'
for tagattribute in attributes:
newelement.set(attributenamespace+tagattribute, attributes[tagattribute])
if tagtext:
newelement.text = tagtext
return newelement
def pagebreak(type='page', orient='portrait'):
'''Insert a break, default 'page'.
See http://openxmldeveloper.org/forums/thread/4075.aspx
Return our page break element.'''
# Need to enumerate different types of page breaks.
validtypes = ['page', 'section']
if type not in validtypes:
tmpl = 'Page break style "%s" not implemented. Valid styles: %s.'
raise ValueError(tmpl % (type, validtypes))
pagebreak = makeelement('p')
if type == 'page':
run = makeelement('r')
br = makeelement('br', attributes={'type': type})
run.append(br)
pagebreak.append(run)
elif type == 'section':
pPr = makeelement('pPr')
sectPr = makeelement('sectPr')
if orient == 'portrait':
pgSz = makeelement('pgSz', attributes={'w': '12240', 'h': '15840'})
elif orient == 'landscape':
pgSz = makeelement('pgSz', attributes={'h': '12240', 'w': '15840',
'orient': 'landscape'})
sectPr.append(pgSz)
pPr.append(sectPr)
pagebreak.append(pPr)
return pagebreak
def paragraph(paratext, style='BodyText', breakbefore=False, jc='left'):
'''Make a new paragraph element, containing a run, and some text.
Return the paragraph element.
@param string jc: Paragraph alignment, possible values:
left, center, right, both (justified), ...
see http://www.schemacentral.com/sc/ooxml/t-w_ST_Jc.html
for a full list
If paratext is a list, spawn multiple run/text elements.
Support text styles (paratext must then be a list of lists in the form
<text> / <style>. Stile is a string containing a combination od 'bui' chars
example
paratext =\
[ ('some bold text', 'b')
, ('some normal text', '')
, ('some italic underlined text', 'iu')
]
'''
# Make our elements
paragraph = makeelement('p')
if isinstance(paratext, list):
text = []
for pt in paratext:
if isinstance(pt, (list, tuple)):
text.append([makeelement('t', tagtext=pt[0]), pt[1]])
else:
text.append([makeelement('t', tagtext=pt), ''])
else:
text = [[makeelement('t', tagtext=paratext), ''], ]
pPr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': style})
pJc = makeelement('jc', attributes={'val': jc})
pPr.append(pStyle)
pPr.append(pJc)
# Add the text the run, and the run to the paragraph
paragraph.append(pPr)
for t in text:
run = makeelement('r')
rPr = makeelement('rPr')
# Apply styles
if t[1].find('b') > -1:
b = makeelement('b')
rPr.append(b)
if t[1].find('u') > -1:
u = makeelement('u', attributes={'val': 'single'})
rPr.append(u)
if t[1].find('i') > -1:
i = makeelement('i')
rPr.append(i)
run.append(rPr)
# Insert lastRenderedPageBreak for assistive technologies like
# document narrators to know when a page break occurred.
if breakbefore:
lastRenderedPageBreak = makeelement('lastRenderedPageBreak')
run.append(lastRenderedPageBreak)
run.append(t[0])
paragraph.append(run)
# Return the combined paragraph
return paragraph
def contenttypes():
types = etree.fromstring(
'<Types xmlns="http://schemas.openxmlformats.org/package/2006/conten'
't-types"></Types>')
parts = {
'/word/theme/theme1.xml': 'application/vnd.openxmlformats-officedocu'
'ment.theme+xml',
'/word/fontTable.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.fontTable+xml',
'/docProps/core.xml': 'application/vnd.openxmlformats-package.co'
're-properties+xml',
'/docProps/app.xml': 'application/vnd.openxmlformats-officedocu'
'ment.extended-properties+xml',
'/word/document.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.document.main+xml',
'/word/settings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.settings+xml',
'/word/numbering.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.numbering+xml',
'/word/styles.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.styles+xml',
'/word/webSettings.xml': 'application/vnd.openxmlformats-officedocu'
'ment.wordprocessingml.webSettings+xml'}
for part in parts:
types.append(makeelement('Override', nsprefix=None,
attributes={'PartName': part,
'ContentType': parts[part]}))
# Add support for filetypes
filetypes = {'gif': 'image/gif',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'rels': 'application/vnd.openxmlformats-package.relationships+xml',
'xml': 'application/xml'}
for extension in filetypes:
types.append(makeelement('Default', nsprefix=None,
attributes={'Extension': extension,
'ContentType': filetypes[extension]}))
return types
def heading(headingtext, headinglevel, lang='en'):
'''Make a new heading, return the heading element'''
lmap = {'en': 'Heading', 'it': 'Titolo'}
# Make our elements
paragraph = makeelement('p')
pr = makeelement('pPr')
pStyle = makeelement('pStyle', attributes={'val': lmap[lang]+str(headinglevel)})
run = makeelement('r')
text = makeelement('t', tagtext=headingtext)
# Add the text the run, and the run to the paragraph
pr.append(pStyle)
run.append(text)
paragraph.append(pr)
paragraph.append(run)
# Return the combined paragraph
return paragraph
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0, twunit='auto', borders={}, celstyle=None):
"""
Return a table element based on specified parameters
@param list contents: A list of lists describing contents. Every item in
the list can be a string or a valid XML element
itself. It can also be a list. In that case all the
listed elements will be merged into the cell.
@param bool heading: Tells whether first line should be treated as
heading or not
@param list colw: list of integer column widths specified in wunitS.
@param str cwunit: Unit used for column width:
'pct' : fiftieths of a percent
'dxa' : twentieths of a point
'nil' : no width
'auto' : automagically determined
@param int tblw: Table width
@param int twunit: Unit used for table width. Same possible values as
cwunit.
@param dict borders: Dictionary defining table border. Supported keys
are: 'top', 'left', 'bottom', 'right',
'insideH', 'insideV', 'all'.
When specified, the 'all' key has precedence over
others. Each key must define a dict of border
attributes:
color : The color of the border, in hex or
'auto'
space : The space, measured in points
sz : The size of the border, in eighths of
a point
val : The style of the border, see
http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm
@param list celstyle: Specify the style for each colum, list of dicts.
supported keys:
'align' : specify the alignment, see paragraph
documentation.
@return lxml.etree: Generated XML etree element
"""
table = makeelement('tbl')
columns = len(contents[0])
# Table properties
tableprops = makeelement('tblPr')
tablestyle = makeelement('tblStyle', attributes={'val': ''})
tableprops.append(tablestyle)
tablewidth = makeelement('tblW', attributes={'w': str(tblw), 'type': str(twunit)})
tableprops.append(tablewidth)
if len(borders.keys()):
tableborders = makeelement('tblBorders')
for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']:
if b in borders.keys() or 'all' in borders.keys():
k = 'all' if 'all' in borders.keys() else b
attrs = {}
for a in borders[k].keys():
attrs[a] = unicode(borders[k][a])
borderelem = makeelement(b, attributes=attrs)
tableborders.append(borderelem)
tableprops.append(tableborders)
tablelook = makeelement('tblLook', attributes={'val': '0400'})
tableprops.append(tablelook)
table.append(tableprops)
# Table Grid
tablegrid = makeelement('tblGrid')
for i in range(columns):
tablegrid.append(makeelement('gridCol', attributes={'w': str(colw[i]) if colw else '2390'}))
table.append(tablegrid)
# Heading Row
row = makeelement('tr')
rowprops = makeelement('trPr')
cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'})
rowprops.append(cnfStyle)
row.append(rowprops)
if heading:
i = 0
for heading in contents[0]:
cell = makeelement('tc')
# Cell properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellstyle = makeelement('shd', attributes={'val': 'clear',
'color': 'auto',
'fill': 'FFFFFF',
'themeFill': 'text2',
'themeFillTint': '99'})
cellprops.append(cellwidth)
cellprops.append(cellstyle)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(heading, (list, tuple)):
heading = [heading]
for h in heading:
if isinstance(h, etree._Element):
cell.append(h)
else:
cell.append(paragraph(h, jc='center'))
row.append(cell)
i += 1
table.append(row)
# Contents Rows
for contentrow in contents[1 if heading else 0:]:
row = makeelement('tr')
i = 0
for content in contentrow:
cell = makeelement('tc')
# Properties
cellprops = makeelement('tcPr')
if colw:
wattr = {'w': str(colw[i]), 'type': cwunit}
else:
wattr = {'w': '0', 'type': 'auto'}
cellwidth = makeelement('tcW', attributes=wattr)
cellprops.append(cellwidth)
cell.append(cellprops)
# Paragraph (Content)
if not isinstance(content, (list, tuple)):
content = [content]
for c in content:
if isinstance(c, etree._Element):
cell.append(c)
else:
if celstyle and 'align' in celstyle[i].keys():
align = celstyle[i]['align']
else:
align = 'left'
cell.append(paragraph(c, jc=align))
row.append(cell)
i += 1
table.append(row)
return table
def picture(relationshiplist, picname, picdescription, pixelwidth=None, pixelheight=None, nochangeaspect=True, nochangearrowheads=True):
'''Take a relationshiplist, picture file name, and return a paragraph containing the image
and an updated relationshiplist'''
# http://openxmldeveloper.org/articles/462.aspx
# Create an image. Size may be specified, otherwise it will based on the
# pixel size of image. Return a paragraph containing the picture'''
# Copy the file into the media dir
media_dir = join(template_dir, 'word', 'media')
if not os.path.isdir(media_dir):
os.mkdir(media_dir)
shutil.copyfile(picname, join(media_dir, picname))
# Check if the user has specified a size
if not pixelwidth or not pixelheight:
# If not, get info from the picture itself
pixelwidth, pixelheight = Image.open(picname).size[0:2]
# OpenXML measures on-screen objects in English Metric Units
# 1cm = 36000 EMUs
emuperpixel = 12700
width = str(pixelwidth * emuperpixel)
height = str(pixelheight * emuperpixel)
# Set relationship ID to the first available
picid = '2'
picrelid = 'rId'+str(len(relationshiplist)+1)
relationshiplist.append([
'http://schemas.openxmlformats.org/officeDocument/2006/relationships/image',
'media/'+picname])
# There are 3 main elements inside a picture
# 1. The Blipfill - specifies how the image fills the picture area (stretch, tile, etc.)
blipfill = makeelement('blipFill', nsprefix='pic')
blipfill.append(makeelement('blip', nsprefix='a', attrnsprefix='r',
attributes={'embed': picrelid}))
stretch = makeelement('stretch', nsprefix='a')
stretch.append(makeelement('fillRect', nsprefix='a'))
blipfill.append(makeelement('srcRect', nsprefix='a'))
blipfill.append(stretch)
# 2. The non visual picture properties
nvpicpr = makeelement('nvPicPr', nsprefix='pic')
cnvpr = makeelement('cNvPr', nsprefix='pic',
attributes={'id': '0', 'name': 'Picture 1', 'descr': picname})
nvpicpr.append(cnvpr)
cnvpicpr = makeelement('cNvPicPr', nsprefix='pic')
cnvpicpr.append(makeelement('picLocks', nsprefix='a',
attributes={'noChangeAspect': str(int(nochangeaspect)),
'noChangeArrowheads': str(int(nochangearrowheads))}))
nvpicpr.append(cnvpicpr)
# 3. The Shape properties
sppr = makeelement('spPr', nsprefix='pic', attributes={'bwMode': 'auto'})
xfrm = makeelement('xfrm', nsprefix='a')
xfrm.append(makeelement('off', nsprefix='a', attributes={'x': '0', 'y': '0'}))
xfrm.append(makeelement('ext', nsprefix='a', attributes={'cx': width, 'cy': height}))
prstgeom = makeelement('prstGeom', nsprefix='a', attributes={'prst': 'rect'})
prstgeom.append(makeelement('avLst', nsprefix='a'))
sppr.append(xfrm)
sppr.append(prstgeom)
# Add our 3 parts to the picture element
pic = makeelement('pic', nsprefix='pic')
pic.append(nvpicpr)
pic.append(blipfill)
pic.append(sppr)
# Now make the supporting elements
# The following sequence is just: make element, then add its children
graphicdata = makeelement('graphicData', nsprefix='a',
attributes={'uri': 'http://schemas.openxmlforma'
'ts.org/drawingml/2006/picture'})
graphicdata.append(pic)
graphic = makeelement('graphic', nsprefix='a')
graphic.append(graphicdata)
framelocks = makeelement('graphicFrameLocks', nsprefix='a',
attributes={'noChangeAspect': '1'})
framepr = makeelement('cNvGraphicFramePr', nsprefix='wp')
framepr.append(framelocks)
docpr = makeelement('docPr', nsprefix='wp',
attributes={'id': picid, 'name': 'Picture 1',
'descr': picdescription})
effectextent = makeelement('effectExtent', nsprefix='wp',
attributes={'l': '25400', 't': '0', 'r': '0',
'b': '0'})
extent = makeelement('extent', nsprefix='wp',
attributes={'cx': width, 'cy': height})
inline = makeelement('inline', attributes={'distT': "0", 'distB': "0",
'distL': "0", 'distR': "0"},
nsprefix='wp')
inline.append(extent)
inline.append(effectextent)
inline.append(docpr)
inline.append(framepr)
inline.append(graphic)
drawing = makeelement('drawing')
drawing.append(inline)
run = makeelement('r')
run.append(drawing)
paragraph = makeelement('p')
paragraph.append(run)
return relationshiplist, paragraph
def search(document, search):
'''Search a document for a regex, return success / fail result'''
result = False
searchre = re.compile(search)
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
result = True
return result
def replace(document, search, replace):
'''Replace all occurences of string with a different string, return updated document'''
newdocument = document
searchre = re.compile(search)
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
if searchre.search(element.text):
element.text = re.sub(search, replace, element.text)
return newdocument
def clean(document):
""" Perform misc cleaning operations on documents.
Returns cleaned document.
"""
newdocument = document
# Clean empty text and r tags
for t in ('t', 'r'):
rmlist = []
for element in newdocument.iter():
if element.tag == '{%s}%s' % (nsprefixes['w'], t):
if not element.text and not len(element):
rmlist.append(element)
for element in rmlist:
element.getparent().remove(element)
return newdocument
def findTypeParent(element, tag):
""" Finds fist parent of element of the given type
@param object element: etree element
@param string the tag parent to search for
@return object element: the found parent or None when not found
"""
p = element
while True:
p = p.getparent()
if p.tag == tag:
return p
# Not found
return None
def AdvSearch(document, search, bs=3):
'''Return set of all regex matches
This is an advanced version of python-docx.search() that takes into
account blocks of <bs> elements at a time.
What it does:
It searches the entire document body for text blocks.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search : 'Hello,'
output blocks : [ 'Hello,' ]
original text blocks : [ 'Hel', 'lo', ' __', 'name', '__!' ]
search : '(__[a-z]+__)'
output blocks : [ '__name__' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
append, or a list of etree elements
@param int bs: See above
@return set All occurences of search string
'''
# Compile the search regexp
searchre = re.compile(search)
matches = []
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in document.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
matches.append(match.group())
found = True
return set(matches)
def advReplace(document, search, replace, bs=3):
"""
Replace all occurences of string with a different string, return updated
document
This is a modified version of python-docx.replace() that takes into
account blocks of <bs> elements at a time. The replace element can also
be a string or an xml etree element.
What it does:
It searches the entire document body for text blocks.
Then scan thos text blocks for replace.
Since the text to search could be spawned across multiple text blocks,
we need to adopt some sort of algorithm to handle this situation.
The smaller matching group of blocks (up to bs) is then adopted.
If the matching group has more than one block, blocks other than first
are cleared and all the replacement text is put on first block.
Examples:
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello,' / 'Hi!'
output blocks : [ 'Hi!', '', ' world!' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hello, world' / 'Hi!'
output blocks : [ 'Hi!!', '', '' ]
original text blocks : [ 'Hel', 'lo,', ' world!' ]
search / replace: 'Hel' / 'Hal'
output blocks : [ 'Hal', 'lo,', ' world!' ]
@param instance document: The original document
@param str search: The text to search for (regexp)
@param mixed replace: The replacement text or lxml.etree element to
append, or a list of etree elements
@param int bs: See above
@return instance The document with replacement applied
"""
# Enables debug output
DEBUG = False
newdocument = document
# Compile the search regexp
searchre = re.compile(search)
# Will match against searchels. Searchels is a list that contains last
# n text elements found in the document. 1 < n < bs
searchels = []
for element in newdocument.iter():
if element.tag == '{%s}t' % nsprefixes['w']: # t (text) elements
if element.text:
# Add this element to searchels
searchels.append(element)
if len(searchels) > bs:
# Is searchels is too long, remove first elements
searchels.pop(0)
# Search all combinations, of searchels, starting from
# smaller up to bigger ones
# l = search lenght
# s = search start
# e = element IDs to merge
found = False
for l in range(1, len(searchels)+1):
if found:
break
#print "slen:", l
for s in range(len(searchels)):
if found:
break
if s+l <= len(searchels):
e = range(s, s+l)
#print "elems:", e
txtsearch = ''
for k in e:
txtsearch += searchels[k].text
# Searcs for the text in the whole txtsearch
match = searchre.search(txtsearch)
if match:
found = True
# I've found something :)
if DEBUG:
log.debug("Found element!")
log.debug("Search regexp: %s", searchre.pattern)
log.debug("Requested replacement: %s", replace)
log.debug("Matched text: %s", txtsearch)
log.debug("Matched text (splitted): %s", map(lambda i: i.text, searchels))
log.debug("Matched at position: %s", match.start())
log.debug("matched in elements: %s", e)
if isinstance(replace, etree._Element):
log.debug("Will replace with XML CODE")
elif isinstance(replace(list, tuple)):
log.debug("Will replace with LIST OF ELEMENTS")
else:
log.debug("Will replace with:", re.sub(search, replace, txtsearch))
curlen = 0
replaced = False
for i in e:
curlen += len(searchels[i].text)
if curlen > match.start() and not replaced:
# The match occurred in THIS element. Puth in the
# whole replaced text
if isinstance(replace, etree._Element):
# Convert to a list and process it later
replace = [replace]
if isinstance(replace, (list, tuple)):
# I'm replacing with a list of etree elements
# clear the text in the tag and append the element after the
# parent paragraph
# (because t elements cannot have childs)
p = findTypeParent(searchels[i], '{%s}p' % nsprefixes['w'])
searchels[i].text = re.sub(search, '', txtsearch)
insindex = p.getparent().index(p) + 1
for r in replace:
p.getparent().insert(insindex, r)
insindex += 1
else:
# Replacing with pure text
searchels[i].text = re.sub(search, replace, txtsearch)
replaced = True
log.debug("Replacing in element #: %s", i)
else:
# Clears the other text elements
searchels[i].text = ''
return newdocument
def getdocumenttext(document):
'''Return the raw text of a document, as a list of paragraphs.'''
paratextlist = []
# Compile a list of all paragraph (p) elements
paralist = []
for element in document.iter():
# Find p (paragraph) elements
if element.tag == '{'+nsprefixes['w']+'}p':
paralist.append(element)
# Since a single sentence might be spread over multiple text elements, iterate through each
# paragraph, appending all text (t) children to that paragraphs text.
for para in paralist:
paratext = u''
# Loop through each paragraph
for element in para.iter():
# Find t (text) elements
if element.tag == '{'+nsprefixes['w']+'}t':
if element.text:
paratext = paratext+element.text
elif element.tag == '{'+nsprefixes['w']+'}tab':
paratext = paratext + '\t'
# Add our completed paragraph text to the list of paragraph text
if not len(paratext) == 0:
paratextlist.append(paratext)
return paratextlist
def coreproperties(title, subject, creator, keywords, lastmodifiedby=None):
'''Create core properties (common document properties referred to in the 'Dublin Core' specification).
See appproperties() for other stuff.'''
coreprops = makeelement('coreProperties', nsprefix='cp')
coreprops.append(makeelement('title', tagtext=title, nsprefix='dc'))
coreprops.append(makeelement('subject', tagtext=subject, nsprefix='dc'))
coreprops.append(makeelement('creator', tagtext=creator, nsprefix='dc'))
coreprops.append(makeelement('keywords', tagtext=','.join(keywords), nsprefix='cp'))
if not lastmodifiedby:
lastmodifiedby = creator
coreprops.append(makeelement('lastModifiedBy', tagtext=lastmodifiedby, nsprefix='cp'))
coreprops.append(makeelement('revision', tagtext='1', nsprefix='cp'))
coreprops.append(makeelement('category', tagtext='Examples', nsprefix='cp'))
coreprops.append(makeelement('description', tagtext='Examples', nsprefix='dc'))
currenttime = time.strftime('%Y-%m-%dT%H:%M:%SZ')
# Document creation and modify times
# Prob here: we have an attribute who name uses one namespace, and that
# attribute's value uses another namespace.
# We're creating the element from a string as a workaround...
for doctime in ['created', 'modified']:
coreprops.append(etree.fromstring('''<dcterms:'''+doctime+''' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:dcterms="http://purl.org/dc/terms/" xsi:type="dcterms:W3CDTF">'''+currenttime+'''</dcterms:'''+doctime+'''>'''))
pass
return coreprops
def appproperties():
"""
Create app-specific properties. See docproperties() for more common
document properties.
"""
appprops = makeelement('Properties', nsprefix='ep')
appprops = etree.fromstring(
'<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties x'
'mlns="http://schemas.openxmlformats.org/officeDocument/2006/extended'
'-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocum'
'ent/2006/docPropsVTypes"></Properties>')
props =\
{'Template': 'Normal.dotm',
'TotalTime': '6',
'Pages': '1',
'Words': '83',
'Characters': '475',
'Application': 'Microsoft Word 12.0.0',
'DocSecurity': '0',
'Lines': '12',
'Paragraphs': '8',
'ScaleCrop': 'false',
'LinksUpToDate': 'false',
'CharactersWithSpaces': '583',
'SharedDoc': 'false',
'HyperlinksChanged': 'false',
'AppVersion': '12.0000'}
for prop in props:
appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None))
return appprops
def websettings():
'''Generate websettings'''
web = makeelement('webSettings')
web.append(makeelement('allowPNG'))
web.append(makeelement('doNotSaveAsSingleFile'))
return web
def relationshiplist():
relationshiplist =\
[['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/numbering', 'numbering.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/styles', 'styles.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/settings', 'settings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/webSettings', 'webSettings.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/fontTable', 'fontTable.xml'],
['http://schemas.openxmlformats.org/officeDocument/2006/'
'relationships/theme', 'theme/theme1.xml']]
return relationshiplist
def wordrelationships(relationshiplist):
'''Generate a Word relationships file'''
# Default list of relationships
# FIXME: using string hack instead of making element
#relationships = makeelement('Relationships', nsprefix='pr')
relationships = etree.fromstring(
'<Relationships xmlns="http://schemas.openxmlformats.org/package/2006'
'/relationships"></Relationships>')
count = 0
for relationship in relationshiplist:
# Relationship IDs (rId) start at 1.
rel_elm = makeelement('Relationship', nsprefix=None,
attributes={'Id': 'rId'+str(count+1),
'Type': relationship[0],
'Target': relationship[1]}
)
relationships.append(rel_elm)
count += 1
return relationships
def savedocx(document, coreprops, appprops, contenttypes, websettings, wordrelationships, output):
'''Save a modified document'''
assert os.path.isdir(template_dir)
docxfile = zipfile.ZipFile(output, mode='w', compression=zipfile.ZIP_DEFLATED)
# Move to the template data path
prev_dir = os.path.abspath('.') # save previous working dir
os.chdir(template_dir)
# Serialize our trees into out zip file
treesandfiles = {document: 'word/document.xml',
coreprops: 'docProps/core.xml',
appprops: 'docProps/app.xml',
contenttypes: '[Content_Types].xml',
websettings: 'word/webSettings.xml',
wordrelationships: 'word/_rels/document.xml.rels'}
for tree in treesandfiles:
log.info('Saving: %s' % treesandfiles[tree])
treestring = etree.tostring(tree, pretty_print=True)
docxfile.writestr(treesandfiles[tree], treestring)
# Add & compress support files
files_to_ignore = ['.DS_Store'] # nuisance from some os's
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename in files_to_ignore:
continue
templatefile = join(dirpath, filename)
archivename = templatefile[2:]
log.info('Saving: %s', archivename)
docxfile.write(templatefile, archivename)
log.info('Saved new file to: %r', output)
docxfile.close()
os.chdir(prev_dir) # restore previous working dir
return
|
|
# -*- coding:utf-8 -*-
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2015 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import os
import shutil
import tempfile
import unittest
from bandit.core import utils as b_utils
def _touch(path):
'''Create an empty file at ``path``.'''
newf = open(path, 'w')
newf.close()
class UtilTests(unittest.TestCase):
'''This set of tests exercises bandit.core.util functions
'''
def setUp(self):
super(UtilTests, self).setUp()
self._setup_get_module_qualname_from_path()
def tearDown(self):
self._tear_down_get_module_qualname_from_path()
def _setup_get_module_qualname_from_path(self):
'''Setup a fake module directory tree for testing
get_module_qualname_from_path().
Create temporary directory and then create fake .py files
within directory structure. We setup test cases for
a typical module, a path misssing a middle __init__.py,
no __init__.py anywhere in path, symlinking .py files.
'''
self.tempdir = tempfile.mkdtemp()
self.reltempdir = os.path.relpath(self.tempdir)
# good/a/b/c/test_typical.py
os.makedirs(os.path.join(
self.tempdir, 'good', 'a', 'b', 'c'), 0o755)
_touch(os.path.join(self.tempdir, 'good', '__init__.py'))
_touch(os.path.join(self.tempdir, 'good', 'a', '__init__.py'))
_touch(os.path.join(
self.tempdir, 'good', 'a', 'b', '__init__.py'))
_touch(os.path.join(
self.tempdir, 'good', 'a', 'b', 'c', '__init__.py'))
_touch(os.path.join(
self.tempdir, 'good', 'a', 'b', 'c', 'test_typical.py'))
# missingmid/a/b/c/test_missingmid.py
os.makedirs(os.path.join(
self.tempdir, 'missingmid', 'a', 'b', 'c'), 0o755)
_touch(os.path.join(self.tempdir, 'missingmid', '__init__.py'))
# no missingmid/a/__init__.py
_touch(os.path.join(
self.tempdir, 'missingmid', 'a', 'b', '__init__.py'))
_touch(os.path.join(
self.tempdir, 'missingmid', 'a', 'b', 'c', '__init__.py'))
_touch(os.path.join(
self.tempdir, 'missingmid', 'a', 'b', 'c', 'test_missingmid.py'))
# missingend/a/b/c/test_missingend.py
os.makedirs(os.path.join(
self.tempdir, 'missingend', 'a', 'b', 'c'), 0o755)
_touch(os.path.join(
self.tempdir, 'missingend', '__init__.py'))
_touch(os.path.join(
self.tempdir, 'missingend', 'a', 'b', '__init__.py'))
# no missingend/a/b/c/__init__.py
_touch(os.path.join(
self.tempdir, 'missingend', 'a', 'b', 'c', 'test_missingend.py'))
# syms/a/bsym/c/test_typical.py
os.makedirs(os.path.join(self.tempdir, 'syms', 'a'), 0o755)
_touch(os.path.join(self.tempdir, 'syms', '__init__.py'))
_touch(os.path.join(self.tempdir, 'syms', 'a', '__init__.py'))
os.symlink(os.path.join(self.tempdir, 'good', 'a', 'b'),
os.path.join(self.tempdir, 'syms', 'a', 'bsym'))
def _tear_down_get_module_qualname_from_path(self):
'''Remove temp directory tree from test setup'''
shutil.rmtree(self.tempdir)
def test_get_module_qualname_from_path_abs_typical(self):
'''Test get_module_qualname_from_path with typical absolute paths'''
name = b_utils.get_module_qualname_from_path(os.path.join(
self.tempdir, 'good', 'a', 'b', 'c', 'test_typical.py'))
self.assertEqual('good.a.b.c.test_typical', name)
def test_get_module_qualname_from_path_abs_missingmid(self):
'''Test get_module_qualname_from_path with missing module
__init__.py'''
name = b_utils.get_module_qualname_from_path(os.path.join(
self.tempdir, 'missingmid', 'a', 'b', 'c',
'test_missingmid.py'))
self.assertEqual('b.c.test_missingmid', name)
def test_get_module_qualname_from_path_abs_missingend(self):
'''Test get_module_qualname_from_path with no __init__.py
in last dir'''
name = b_utils.get_module_qualname_from_path(os.path.join(
self.tempdir, 'missingend', 'a', 'b', 'c',
'test_missingend.py'))
self.assertEqual('test_missingend', name)
def test_get_module_qualname_from_path_abs_syms(self):
'''Test get_module_qualname_from_path with symlink in path'''
name = b_utils.get_module_qualname_from_path(os.path.join(
self.tempdir, 'syms', 'a', 'bsym', 'c', 'test_typical.py'))
self.assertEqual('syms.a.bsym.c.test_typical', name)
def test_get_module_qualname_from_path_rel_typical(self):
'''Test get_module_qualname_from_path with typical relative paths'''
name = b_utils.get_module_qualname_from_path(os.path.join(
self.reltempdir, 'good', 'a', 'b', 'c', 'test_typical.py'))
self.assertEqual('good.a.b.c.test_typical', name)
def test_get_module_qualname_from_path_rel_missingmid(self):
'''Test get_module_qualname_from_path with module __init__.py
missing and relative paths'''
name = b_utils.get_module_qualname_from_path(os.path.join(
self.reltempdir, 'missingmid', 'a', 'b', 'c',
'test_missingmid.py'))
self.assertEqual('b.c.test_missingmid', name)
def test_get_module_qualname_from_path_rel_missingend(self):
'''Test get_module_qualname_from_path with __init__.py missing from
last dir and using relative paths'''
name = b_utils.get_module_qualname_from_path(os.path.join(
self.reltempdir, 'missingend', 'a', 'b', 'c',
'test_missingend.py'))
self.assertEqual('test_missingend', name)
def test_get_module_qualname_from_path_rel_syms(self):
'''Test get_module_qualname_from_path with symbolic relative paths'''
name = b_utils.get_module_qualname_from_path(os.path.join(
self.reltempdir, 'syms', 'a', 'bsym', 'c', 'test_typical.py'))
self.assertEqual('syms.a.bsym.c.test_typical', name)
def test_get_module_qualname_from_path_sys(self):
'''Test get_module_qualname_from_path with system module paths'''
name = b_utils.get_module_qualname_from_path(os.__file__)
self.assertEqual('os', name)
# This will fail because of magic for os.path. Not sure how to fix.
# name = b_utils.get_module_qualname_from_path(os.path.__file__)
# self.assertEqual(name, 'os.path')
def test_get_module_qualname_from_path_invalid_path(self):
'''Test get_module_qualname_from_path with invalid path '''
name = b_utils.get_module_qualname_from_path('/a/b/c/d/e.py')
self.assertEqual('e', name)
def test_get_module_qualname_from_path_dir(self):
'''Test get_module_qualname_from_path with dir path '''
with self.assertRaises(b_utils.InvalidModulePath):
b_utils.get_module_qualname_from_path('/tmp/')
def test_namespace_path_join(self):
p = b_utils.namespace_path_join('base1.base2', 'name')
self.assertEqual('base1.base2.name', p)
def test_namespace_path_split(self):
(head, tail) = b_utils.namespace_path_split('base1.base2.name')
self.assertEqual('base1.base2', head)
self.assertEqual('name', tail)
def test_get_call_name1(self):
'''Gets a qualified call name'''
tree = ast.parse('a.b.c.d(x,y)').body[0].value
name = b_utils.get_call_name(tree, {})
self.assertEqual('a.b.c.d', name)
def test_get_call_name2(self):
'''Gets qualified call name and resolves aliases'''
tree = ast.parse('a.b.c.d(x,y)').body[0].value
name = b_utils.get_call_name(tree, {'a': 'alias.x.y'})
self.assertEqual('alias.x.y.b.c.d', name)
name = b_utils.get_call_name(tree, {'a.b': 'alias.x.y'})
self.assertEqual('alias.x.y.c.d', name)
name = b_utils.get_call_name(tree, {'a.b.c.d': 'alias.x.y'})
self.assertEqual('alias.x.y', name)
def test_get_call_name3(self):
'''Getting name for a complex call'''
tree = ast.parse('a.list[0](x,y)').body[0].value
name = b_utils._get_attr_qual_name(tree, {})
self.assertEqual('', name)
# TODO(ljfisher) At best we might be able to get:
# self.assertEqual(name, 'a.list[0]')
def test_linerange(self):
self.test_file = open("./examples/jinja2_templating.py")
self.tree = ast.parse(self.test_file.read())
# Check linerange returns corrent number of lines
line = self.tree.body[8]
lrange = b_utils.linerange(line)
# line 9 should be three lines long
self.assertEqual(3, len(lrange))
# the range should be the correct line numbers
self.assertEqual([11, 12, 13], list(lrange))
|
|
# -*- coding: utf-8 -*-
"""Tests for multiset methods in module pymsetmath
Test specification
------------------
- test internal methods
- test i/o arguments
- test math
- test functionality
:Author: Hy Carrinski
"""
from decimal import Decimal
import math
import random
import unittest
from pymsetmath.multiset import is_nonneg_int, Multiset
from pymsetmath import examples
class TestMultisetMath(unittest.TestCase):
"""Test Multiset calculations."""
def setUp(self):
self.mset = Multiset()
def tearDown(self):
self.mset.clear()
self.mset = None
def test_factorial_random_inputs(self):
"""Test factorial random inputs."""
for val in random.sample(xrange(300), 5):
result = self.mset.factorial(val)
expected = math.factorial(val)
self.assertEqual(result, expected)
def test_factorial_bad_inputs(self):
"""Test factorial bad inputs."""
inputs = (-1, None)
for value in inputs:
self.assertRaises(ValueError, self.mset.factorial, value)
def test_factorial_small_inputs(self):
"""Test factorial small inputs."""
pairs = ((0, 1), (1, 1), (2, 2), (3, 6))
for (value, expected) in pairs:
self.assertEqual(self.mset.factorial(value), expected)
def test_clear_method(self):
"""Test clear method."""
self.mset.factorial(10)
self.assertTrue(len(self.mset._data) > 10)
self.mset.clear()
self.assertTrue(len(self.mset._data) == 1)
def test_is_nonneg_int_on_several_inputs(self):
"""Test is_nonneg_int on several inputs."""
pairs = ((None, False), (-1, False),
(0, True), (1, True), (5.0, True))
for (value, expected) in pairs:
self.assertEqual(is_nonneg_int(value), expected)
def test_uniq_msets_on_bad_input(self):
"""Test uniq_msets on on bad input."""
f = lambda total, length: list(self.mset.uniq_msets(total, length))
self.assertRaises(TypeError, f, 10, None)
self.assertRaises(ValueError, f, -3, 2)
def test_uniq_msets_on_several_inputs(self):
"""Test uniq_msets on on several inputs."""
pairs = {(10, 0): [()], (10, 1): [(10,)]}
for (value, expected) in pairs.items():
result = list(self.mset.uniq_msets(*value))
self.assertEqual(result, expected)
def test_uniq_msets_contains_unique_elements(self):
"""Test uniq_msets contains unique elements."""
expected = set([(3, 2), (4, 1), (5, 0)])
result = set(self.mset.uniq_msets(5, 2))
self.assertEqual(result, expected)
def test_uniq_msets_contains_correct_number_of_elements(self):
"""Test uniq_msets contains correct number of elements."""
result = list(self.mset.uniq_msets(5, 2))
self.assertEqual(len(result), len(set(result)))
def test_num_ways_n_tuple_key(self):
"""Test num_ways n tuple key."""
expected = (4, 5, 5)
num_ways = self.mset.num_ways
result = tuple(len(list(num_ways(4, 4, x))) for x in xrange(1,4))
self.assertEqual(result, expected)
def test_number_of_arrangements_bad_input(self):
"""Test number_of_arrangements bad input."""
num_arrange = self.mset.number_of_arrangements
self.assertRaises(TypeError, num_arrange, 5)
self.assertRaises(TypeError, num_arrange, None)
self.assertRaises(ValueError, num_arrange, ())
def test_number_of_arrangements_good_input(self):
"""Test number_of_arrangements good input."""
pairs = (((3,), 1), ((2, 3), 2), ((1, 2, 3), 6))
num_arrange = self.mset.number_of_arrangements
for (value, expected) in pairs:
self.assertEqual(num_arrange(value), expected)
def test_iterate_through_number_of_arrangements_list_input(self):
"""Test iterate through number_of_arrangements list input."""
groups = [(0, 5), (1, 4), (2, 3)]
result = dict((grp, self.mset.number_of_arrangements(grp))
for grp in groups)
expected = {(0, 5): 2, (1, 4): 2, (2, 3): 2}
self.assertEqual(result, expected)
def test_iterate_through_number_of_arrangements_by_uniq_msets(self):
"""Test iterate through number_of_arrangements by uniq_msets."""
result = dict((grp, self.mset.number_of_arrangements(grp))
for grp in self.mset.uniq_msets(5, 2))
expected = {(5, 0): 2, (4, 1): 2, (3, 2): 2}
self.assertEqual(result, expected)
def test_multinomial_coeff_bad_inputs(self):
"""Test multinomial_coeff bad inputs."""
m_coeff = self.mset.multinomial_coeff
self.assertRaises(TypeError, m_coeff, None)
self.assertRaises(ValueError, m_coeff, ())
def test_multinomial_coeff_good_inputs(self):
"""Test multinomial_coeff good inputs."""
pairs = (((0,), 1), ((3,), 1), ((2, 3), 10), ((1, 2, 3), 60))
m_coeff = self.mset.multinomial_coeff
for (value, expected) in pairs:
self.assertEqual(m_coeff(value), expected)
def test_number_arrangements_of_uniq_msets_is_mset_number(self):
"""Test number_arrangements of uniq_msets is mset number."""
for n in (5, 15, 30):
for m in (3, 6):
l1 = self.mset.multiset_number(n, m)
l2 = sum(self.mset.number_of_arrangements(ms)
for ms in self.mset.uniq_msets(n, m))
self.assertEqual(l1, l2)
def test_num_uniq_msets_is_equal_to_calculated_number(self):
"""Test num_uniq_msets is equal to calculated number."""
for n in (5, 15, 30):
for m in (3, 6):
l1 = self.mset.num_uniq_msets(n, m)
l2 = sum(1 for ms in self.mset.uniq_msets(n, m))
self.assertEqual(l1, l2)
class TestProbabilities(unittest.TestCase):
"""Test probability calculation examples."""
def test_ex_compute_prob_top_100_from_10_return_15(self):
"""Test ex compute prob top 100 from 10 return 15."""
for stats in examples.compute_probabilities(100, 10):
if stats['count'] == 15:
result = stats['p']
break
expected = 0.5929
self.assertAlmostEqual(result, expected, 3)
def test_ex_compute_prob_and_use_threshold(self):
"""Test ex compute prob and use threshold."""
for stats in examples.compute_probabilities(5, 2, 4):
result = stats
self.assertEqual(result['count'], 4)
self.assertAlmostEqual(result['p'], 0.375, 3)
def test_ex_compute_prob_with_boundary_threshold(self):
"""Test ex compute prob with boundary threshold."""
result = None
for stats in examples.compute_probabilities(5, 2, 1):
result = stats
self.assertTrue(result is None)
def test_ex_compute_all_probabilities_for_5_and_2(self):
"""Test ex compute all probabilities for 5 and 2."""
result = list(examples.compute_all_probabilities(5, 2))
expected = [{'n': 5, 'm': 2, 'count': 3, 'p': Decimal('1')},
{'n': 5, 'm': 2, 'count': 4, 'p': Decimal('0.375')},
{'n': 5, 'm': 2, 'count': 5, 'p': Decimal('0.0625')}]
for stats in result:
stats['p'] = Decimal('%g' % stats['p'])
self.assertEqual(result, expected)
def test_ex_compute_all_probabilities_for_3_real_cases(self):
"""Test ex compute all probabilities for 3 real cases."""
pairs = [({'n': 40, 'm': 4, 'count': 20}, float('2.2897244280e-03')),
({'n': 40, 'm': 8, 'count': 10}, float('1.7789512134e-01')),
({'n': 80, 'm': 4, 'count': 35}, float('7.8544408865e-04'))]
for (param, expected) in pairs:
n, m = param['n'], param['m']
for stats in examples.compute_all_probabilities(n, m):
if stats['count'] == param['count']:
break
self.assertAlmostEqual(stats['p'], expected, 10)
def test_ex_print_probabilities_with_bad_inputs(self):
"""Test ex print probabilities with bad inputs."""
ex_print = examples.print_cumulative_prob
self.assertRaises(ValueError, ex_print, 1, 0, -1)
self.assertRaises(ValueError, ex_print, 0, -1, 1)
self.assertRaises(ValueError, ex_print, -1, 1, 0)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'LogicalGroup.owner'
db.add_column(u'ddsc_core_logicalgroup', 'owner',
self.gf('django.db.models.fields.related.ForeignKey')(default=-999, to=orm['lizard_security.DataOwner']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'LogicalGroup.owner'
db.delete_column(u'ddsc_core_logicalgroup', 'owner_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ddsc_core.compartment': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Compartment'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'})
},
u'ddsc_core.folder': {
'Meta': {'object_name': 'Folder'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.ipaddress': {
'Meta': {'object_name': 'IPAddress'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.location': {
'Meta': {'object_name': 'Location'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geometry_precision': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'point_geometry': ('django.contrib.gis.db.models.fields.PointField', [], {'dim': '3', 'null': 'True', 'blank': 'True'}),
'real_geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'dim': '3', 'null': 'True', 'blank': 'True'})
},
u'ddsc_core.locationgroup': {
'Meta': {'object_name': 'LocationGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'location_groups'", 'blank': 'True', 'to': u"orm['ddsc_core.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'ddsc_core.logicalgroup': {
'Meta': {'object_name': 'LogicalGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']"})
},
u'ddsc_core.logicalgroupedge': {
'Meta': {'unique_together': "((u'child', u'parent'),)", 'object_name': 'LogicalGroupEdge'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'childs'", 'to': u"orm['ddsc_core.LogicalGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'parents'", 'to': u"orm['ddsc_core.LogicalGroup']"})
},
u'ddsc_core.measuringdevice': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringDevice'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.measuringmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titel': ('django.db.models.fields.CharField', [], {'max_length': '600', 'null': 'True'})
},
u'ddsc_core.parameter': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Parameter'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'cas_number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sikb_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True'})
},
u'ddsc_core.processingmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ProcessingMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.referenceframe': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ReferenceFrame'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.timeseries': {
'Meta': {'object_name': 'Timeseries'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'compartment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Compartment']", 'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'timeseries'", 'symmetrical': 'False', 'to': "orm['lizard_security.DataSet']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'first_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_value_number': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'timeseries'", 'null': 'True', 'to': u"orm['ddsc_core.Location']"}),
'measuring_device': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringDevice']", 'null': 'True', 'blank': 'True'}),
'measuring_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringMethod']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Parameter']", 'null': 'True', 'blank': 'True'}),
'processing_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ProcessingMethod']", 'null': 'True', 'blank': 'True'}),
'reference_frame': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ReferenceFrame']", 'null': 'True', 'blank': 'True'}),
'supplying_system': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'timeseries'", 'null': 'True', 'to': "orm['auth.User']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Unit']", 'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
u'ddsc_core.unit': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Unit'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'conversion_factor': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'lizard_security.dataowner': {
'Meta': {'ordering': "['name']", 'object_name': 'DataOwner'},
'data_managers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'remarks': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['owner', 'name']", 'unique_together': "(('owner', 'name'),)", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['ddsc_core']
|
|
from panda3d.core import *
from panda3d.direct import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.interval.IntervalGlobal import *
from direct.task import Task
from direct.directnotify import DirectNotifyGlobal
from math import *
from direct.distributed.ClockDelta import *
from toontown.golf import GolfGlobals
from toontown.shtiker.GolfPage import GolfTrophy
class GolfRewardDialog:
notify = directNotify.newCategory('GolfRewardDialog')
def __init__(self, avIdList, trophyList, rankingsList, holeBestList, courseBestList, cupList, localAvId, tieBreakWinner, aimTimesList, endMovieCallback = None):
self.avIdList = avIdList
self.trophyList = trophyList
self.rankingsList = rankingsList
self.holeBestList = holeBestList
self.courseBestList = courseBestList
self.cupList = cupList
self.tieBreakWinner = tieBreakWinner
self.movie = None
self.myPlace = 0
self.victory = None
self.endMovieCallback = endMovieCallback
self.aimTimesList = aimTimesList
self.setup(localAvId)
return
def calcTrophyTextListForOnePlayer(self, avId):
retval = []
av = base.cr.doId2do.get(avId)
if av and avId in self.avIdList:
playerIndex = self.avIdList.index(avId)
name = av.getName()
for trophyIndex in xrange(len(self.trophyList[playerIndex])):
wonTrophy = self.trophyList[playerIndex][trophyIndex]
if wonTrophy:
trophyName = TTLocalizer.GolfTrophyDescriptions[trophyIndex]
text = TTLocalizer.GolfAvReceivesTrophy % {'name': name,
'award': trophyName}
retval.append(text)
return retval
def calcCupTextListForAllPlayers(self, localAvId):
retval = []
for cupPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cupPlayerIndex] != localAvId:
av = base.cr.doId2do.get(self.avIdList[cupPlayerIndex])
name = ''
if av:
name = av.getName()
cupIndex = 0
for cupIndex in xrange(len(self.cupList[cupPlayerIndex])):
if self.cupList[cupPlayerIndex][cupIndex]:
cupName = TTLocalizer.GolfCupDescriptions[cupIndex]
text = TTLocalizer.GolfAvReceivesCup % {'name': name,
'cup': cupName}
retval.append(text)
for cupPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cupPlayerIndex] == localAvId:
av = base.cr.doId2do.get(self.avIdList[cupPlayerIndex])
name = av.getName()
cupIndex = 0
for cupIndex in xrange(len(self.cupList[cupPlayerIndex])):
if self.cupList[cupPlayerIndex][cupIndex]:
cupName = TTLocalizer.GolfCupDescriptions[cupIndex]
text = TTLocalizer.GolfAvReceivesCup % {'name': name,
'cup': cupName}
retval.append(text)
return retval
def calcRankings(self, localAvId):
retval = []
self.notify.debug('aimTimesList=%s' % self.aimTimesList)
for rank in xrange(len(self.rankingsList) + 1):
for avIndex in xrange(len(self.avIdList)):
if self.rankingsList[avIndex] == rank:
name = ' '
av = base.cr.doId2do.get(self.avIdList[avIndex])
if av:
name = av.getName()
text = '%d. ' % rank + ' ' + name
if GolfGlobals.TIME_TIE_BREAKER:
time = self.aimTimesList[avIndex]
minutes = int(time / 60)
time -= minutes * 60
seconds = int(time)
padding = (seconds < 10 and ['0'] or [''])[0]
time -= seconds
fraction = str(time)[2:4]
fraction = fraction + '0' * (2 - len(fraction))
timeStr = "%d'%s%d''%s" % (minutes,
padding,
seconds,
fraction)
text += ' - ' + timeStr
retval.append(text)
if self.avIdList[avIndex] == localAvId:
self.myPlace = rank
return retval
def calcHoleBestTextListForAllPlayers(self, localAvId):
retval = []
if GolfGlobals.CalcOtherHoleBest:
for hbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[hbPlayerIndex] != localAvId:
av = base.cr.doId2do.get(self.avIdList[hbPlayerIndex])
name = av.getName()
for hbIndex in xrange(len(self.holeBestList[hbPlayerIndex])):
if self.holeBestList[hbPlayerIndex][hbIndex]:
hbName = TTLocalizer.GolfHoleNames[hbIndex]
text = TTLocalizer.GolfAvReceivesHoleBest % {'name': name,
'hole': hbName}
retval.append(text)
for hbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[hbPlayerIndex] == localAvId:
av = base.cr.doId2do.get(self.avIdList[hbPlayerIndex])
name = av.getName()
for hbIndex in xrange(len(self.holeBestList[hbPlayerIndex])):
if self.holeBestList[hbPlayerIndex][hbIndex]:
hbName = TTLocalizer.GolfHoleNames[hbIndex]
text = TTLocalizer.GolfAvReceivesHoleBest % {'name': name,
'hole': hbName}
retval.append(text)
return retval
def calcCourseBestTextListForAllPlayers(self, localAvId):
retval = []
if GolfGlobals.CalcOtherCourseBest:
for cbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cbPlayerIndex] != localAvId:
av = base.cr.doId2do.get(self.avIdList[cbPlayerIndex])
name = av.getName()
for cbIndex in xrange(len(self.holeBestList[cbPlayerIndex])):
if self.holeBestList[cbPlayerIndex][cbIndex]:
cbName = TTLocalizer.GolfCourseNames[cbIndex]
text = TTLocalizer.GolfAvReceivesCourseBest % {'name': name,
'course': cbName}
retval.append(text)
for cbPlayerIndex in xrange(len(self.avIdList)):
if self.avIdList[cbPlayerIndex] == localAvId:
av = base.cr.doId2do.get(self.avIdList[cbPlayerIndex])
name = av.getName()
for cbIndex in xrange(len(self.courseBestList[cbPlayerIndex])):
if self.courseBestList[cbPlayerIndex][cbIndex]:
cbName = TTLocalizer.GolfCourseNames[cbIndex]
text = TTLocalizer.GolfAvReceivesCourseBest % {'name': name,
'course': cbName}
retval.append(text)
return retval
def createRewardMovie(self, localAvId):
retval = Sequence(name='Reward sequence', autoPause=1)
self.trophy = None
def setTrophyLabelText(text, playerIndex, trophyIndex):
self.rankLabel.hide()
self.rewardLabel.hide()
self.trophy = GolfTrophy(level=self.trophyList[playerIndex][trophyIndex], parent=self.trophyLabel, pos=(1.3, 0, -0.25))
self.trophy.setScale(0.65, 1, 0.65)
self.trophy.show()
self.trophyLabel['text'] = text
def setRewardLabelText(text):
self.rewardLabel.show()
self.rankLabel.hide()
self.trophyLabel.hide()
if self.trophy:
self.trophy.hide()
self.rewardLabel['text'] = text
def setRankLabelText(text):
self.rankLabel.show()
self.rewardLabel.hide()
self.trophyLabel.hide()
if self.trophy:
self.trophy.hide()
self.rankLabel['text'] = text
if len(self.avIdList) > 1:
self.victory = base.loadSfx('phase_6/audio/sfx/KART_Applause_%d.ogg' % self.myPlace)
self.victory.play()
for avId in self.avIdList:
if avId != localAvId:
rewardTextList = self.calcTrophyTextListForOnePlayer(avId)
trophyIndex = 0
for rewardText in rewardTextList:
playerIndex = self.avIdList.index(avId)
var = (rewardText, playerIndex, trophyIndex)
oneTrophyIval = Parallel(Func(setTrophyLabelText, rewardText, playerIndex, trophyIndex), LerpColorScaleInterval(self.trophyLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
trophyIndex = trophyIndex + 1
retval.append(oneTrophyIval)
rewardTextList = self.calcTrophyTextListForOnePlayer(localAvId)
trophyIndex = 0
playerIndex = self.avIdList.index(localAvId)
for rewardText in rewardTextList:
if len(rewardTextList) > 0:
var = (rewardText, playerIndex, trophyIndex)
oneRewardIval = Parallel(Func(setTrophyLabelText, rewardText, playerIndex, trophyIndex), LerpColorScaleInterval(self.trophyLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneRewardIval)
rewardCupList = self.calcCupTextListForAllPlayers(localAvId)
if len(rewardCupList) > 0:
for rewardText in rewardCupList:
oneCupIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='noBlend'))
retval.append(oneCupIval)
if self.tieBreakWinner:
name = ''
av = base.cr.doId2do.get(self.tieBreakWinner)
if av:
name = av.getName()
if GolfGlobals.TIME_TIE_BREAKER:
rewardText = TTLocalizer.GolfTimeTieBreakWinner % {'name': name}
else:
rewardText = TTLocalizer.GolfTieBreakWinner % {'name': name}
randomWinnerIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 7, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='noBlend'))
retval.append(randomWinnerIval)
rankings = self.calcRankings(localAvId)
rankText = TTLocalizer.GolfRanking + '\n'
for rank in xrange(len(rankings)):
rankText = rankText + rankings[rank] + '\n'
oneRankIval = Parallel(Func(setRankLabelText, rankText), LerpColorScaleInterval(self.rankLabel, 8, Vec4(1, 1, 1, 1), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneRankIval)
rewardHoleList = self.calcHoleBestTextListForAllPlayers(localAvId)
if len(rewardHoleList) > 0:
for rewardText in rewardHoleList:
oneHoleIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 8, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneHoleIval)
rewardCourseList = self.calcCourseBestTextListForAllPlayers(localAvId)
if len(rewardCourseList) > 0:
for rewardText in rewardCourseList:
oneCourseIval = Parallel(Func(setRewardLabelText, rewardText), LerpColorScaleInterval(self.rewardLabel, 4, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeIn'))
retval.append(oneCourseIval)
if self.endMovieCallback:
retval.append(Func(self.endMovieCallback))
return retval
def setup(self, localAvId):
self.rewardBoard = DirectFrame(parent=aspect2d, relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(1.75, 1, 0.6), pos=(0, 0, -0.6))
self.rewardLabel = DirectLabel(parent=self.rewardBoard, relief=None, pos=(-0, 0, 0), text_align=TextNode.ACenter, text='', text_scale=0.05, text_wordwrap=30)
self.rankLabel = DirectLabel(parent=self.rewardBoard, relief=None, pos=(-0, 0, 0.17), text_align=TextNode.ACenter, text='', text_scale=0.06)
self.trophyLabel = DirectLabel(parent=self.rewardBoard, relief=None, pos=(-0.7, 0, 0.05), text_align=TextNode.ALeft, text='', text_scale=0.06, text_wordwrap=20)
self.movie = self.createRewardMovie(localAvId)
return
def delete(self):
self.movie.pause()
self.notify.debug('Movie is paused')
self.rewardBoard.destroy()
self.notify.debug('Reward board is destroyed')
self.movie = None
self.notify.debug('Movie is deleted')
return
def getMovie(self):
return self.movie
|
|
# -*- coding: utf-8 -*-
import pytest
from punch import version_part as vpart
def test_integer_version_part_init_with_integer():
vp = vpart.IntegerVersionPart('major', 4)
assert vp.value == 4
def test_integer_version_part_init_with_string():
vp = vpart.IntegerVersionPart('major', '4')
assert vp.value == 4
def test_integer_version_part_init_with_none():
vp = vpart.IntegerVersionPart('major', None)
assert vp.value == 0
def test_integer_version_part_init_without_value():
vp = vpart.IntegerVersionPart('major')
assert vp.value == 0
def test_integer_version_part_init_with_start_value():
vp = vpart.IntegerVersionPart('major', start_value=1)
assert vp.value == 1
vp.inc()
vp.reset()
assert vp.value == 1
def test_integer_version_part_increases():
vp = vpart.IntegerVersionPart('major', 4)
vp.inc()
assert vp.value == 5
def test_integer_version_part_set():
vp = vpart.IntegerVersionPart('major', 4)
vp.set(9)
assert vp.value == 9
def test_integer_version_part_reset():
vp = vpart.IntegerVersionPart('major', 4)
vp.reset()
assert vp.value == 0
def test_integer_version_part_copy():
vp = vpart.IntegerVersionPart('major', 4)
nvp = vp.copy()
vp.inc()
assert nvp.value == 4
def test_integer_version_part_with_start_value_copy():
vp = vpart.IntegerVersionPart('major', 4, start_value=1)
nvp = vp.copy()
assert nvp.start_value == 1
def test_valuelist_version_part_init_with_allowed_value():
vp = vpart.ValueListVersionPart('major', 0, [0, 2, 4, 6, 8])
assert vp.value == 0
def test_valuelist_version_part_init_with_not_allowed_value():
with pytest.raises(ValueError):
vpart.ValueListVersionPart('major', 1, [0, 2, 4, 6, 8])
def test_valuelist_version_part_init_with_none():
vp = vpart.ValueListVersionPart('major', None, [0, 2, 4, 6, 8])
assert vp.value == 0
def test_valuelist_version_part_increase():
vp = vpart.ValueListVersionPart('major', 0, [0, 2, 4, 6, 8])
vp.inc()
assert vp.value == 2
def test_valuelist_version_part_set():
vp = vpart.ValueListVersionPart('major', 0, [0, 2, 4, 6, 8])
vp.set(8)
assert vp.value == 8
def test_valuelist_version_part_increase_from_last():
vp = vpart.ValueListVersionPart('major', 8, [0, 2, 4, 6, 8])
vp.inc()
assert vp.value == 0
def test_valuelist_version_part_increase_with_non_numerical_values():
vp = vpart.ValueListVersionPart(
'major', 0, [0, 'alpha', 'beta', 'rc1', 'rc2', 1]
)
vp.inc()
assert vp.value == 'alpha'
def test_valuelist_version_part_set_with_non_numerical_values():
vp = vpart.ValueListVersionPart(
'major', 0, [0, 'alpha', 'beta', 'rc1', 'rc2', 1]
)
vp.set('rc1')
assert vp.value == 'rc1'
def test_valuelist_version_part_reset():
vp = vpart.ValueListVersionPart('major', 4, [0, 2, 4, 6, 8])
vp.reset()
assert vp.value == 0
def test_valuelist_version_part_copy():
vp = vpart.ValueListVersionPart('major', 4, [0, 2, 4, 6, 8])
nvp = vp.copy()
vp.inc()
vp.values.append(9)
assert nvp.value == 4
assert nvp.values == [0, 2, 4, 6, 8]
def test_get_integer_version_part_from_full_dict():
input_dict = {
'name': 'major',
'value': 1,
'type': 'integer'
}
vp = vpart.VersionPart.from_dict(input_dict)
assert vp.name == 'major'
assert vp.value == 1
assert isinstance(vp, vpart.IntegerVersionPart)
def test_get_integer_version_part_from_partial_dict():
input_dict = {
'name': 'major',
'value': 1,
}
vp = vpart.VersionPart.from_dict(input_dict)
assert vp.name == 'major'
assert vp.value == 1
assert isinstance(vp, vpart.IntegerVersionPart)
def test_get_value_list_version_part_from_full_dict():
input_dict = {
'name': 'major',
'value': 'alpha',
'type': 'value_list',
'allowed_values': ['alpha', 'beta', 'stable']
}
vp = vpart.VersionPart.from_dict(input_dict)
assert vp.name == 'major'
assert vp.value == 'alpha'
assert isinstance(vp, vpart.ValueListVersionPart)
assert vp.values == ['alpha', 'beta', 'stable']
def test_date_version_part_init_without_value(mocker):
mock_strftime = mocker.patch('punch.version_part.strftime')
mock_strftime.return_value = '2018'
vp = vpart.DateVersionPart('major', value=None, fmt='%Y')
mock_strftime.assert_called_with('%Y')
assert vp.value == '2018'
def test_date_version_part_init_with_value(mocker):
mock_strftime = mocker.patch('punch.version_part.strftime')
mock_strftime.return_value = '2018'
vp = vpart.DateVersionPart('major', value='2017', fmt='%Y')
mock_strftime.assert_not_called()
assert vp.value == '2017'
def test_date_version_part_init_with_integer_value(mocker):
mock_strftime = mocker.patch('punch.version_part.strftime')
mock_strftime.return_value = '2018'
vp = vpart.DateVersionPart('major', value=2017, fmt='%Y')
mock_strftime.assert_not_called()
assert vp.value == '2017'
def test_date_version_part_reset(mocker):
mock_strftime = mocker.patch('punch.version_part.strftime')
vp = vpart.DateVersionPart('major', value='2017', fmt='%Y')
assert vp.value == '2017'
mock_strftime.return_value = '2018'
vp.reset()
mock_strftime.assert_called_with('%Y')
assert vp.value == '2018'
def test_date_version_part_increases_just_resets(mocker):
mock_strftime = mocker.patch('punch.version_part.strftime')
vp = vpart.DateVersionPart('major', value='2017', fmt='%Y')
assert vp.value == '2017'
mock_strftime.return_value = '2018'
vp.inc()
mock_strftime.assert_called_with('%Y')
assert vp.value == '2018'
def test_date_version_part_copy():
vp = vpart.DateVersionPart('major', value='2017', fmt='%Y%m')
nvp = vp.copy()
assert nvp.fmt == '%Y%m'
def test_strftime_full_year(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
vpart.strftime('YYYY')
mock_strftime.assert_called_with('%Y')
def test_strftime_short_year(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
vpart.strftime('YY')
mock_strftime.assert_called_with('%y')
def test_strftime_short_year_is_not_padded(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
mock_strftime.return_value = '03'
assert vpart.strftime('YY') == '3'
def test_strftime_short_month(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
vpart.strftime('MM')
mock_strftime.assert_called_with('%m')
def test_strftime_short_month_is_not_padded(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
mock_strftime.return_value = '04'
assert vpart.strftime('MM') == '4'
def test_strftime_month_is_not_stripped_on_the_right(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
mock_strftime.return_value = '10'
assert vpart.strftime('MM') == '10'
def test_strftime_zero_padded_short_month(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
vpart.strftime('0M')
mock_strftime.assert_called_with('%m')
def test_strftime_zero_padded_short_month_is_padded(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
mock_strftime.return_value = '04'
assert vpart.strftime('0M') == '04'
def test_strftime_short_day(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
vpart.strftime('DD')
mock_strftime.assert_called_with('%d')
def test_strftime_short_day_is_not_padded(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
mock_strftime.return_value = '04'
assert vpart.strftime('DD') == '4'
def test_strftime_day_is_not_stripped_on_the_right(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
mock_strftime.return_value = '10'
assert vpart.strftime('DD') == '10'
def test_strftime_zero_padded_short_day(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
vpart.strftime('0D')
mock_strftime.assert_called_with('%d')
def test_strftime_zero_padded_short_day_is_padded(mocker):
mock_strftime = mocker.patch('punch.version_part._strftime')
mock_strftime.return_value = '04'
assert vpart.strftime('0D') == '04'
|
|
#!/usr/bin/python
#
# Generate a list of built-in strings required by Duktape code, output
# duk_strings.h (defines) and duk_strings.c (string data). Raw string
# data is also written to duk_strings.bin.
#
# These strings may be required by execution and/or compilation, or
# built-in code. Strings are included here when it benefits footprint.
# These strings are currently interned although strings needed only by
# the compiler do not strictly need to be. Strings need to be ordered
# so that reserved words are in a certain range (with strict reserved
# words grouped together).
# XXX: integrate more tightly with genbuiltins.py
# XXX: add code to indicate strings which are needed at runtime
# (may be profile dependent); then detect which strings
# genbuiltins.py needs, and finally log unused strings
# Perhaps string lists need to be added programmatically and
# may be omitted based on profile
# XXX: avoid debug-related strings in release build (same applies to
# other configuration dependent strings, like traceback data)
# XXX: better compression
# XXX: reserved word stridx's could be made to match token numbers
# directly so that a duk_stridx2token[] would not be needed
# XXX: improve per string metadata, and sort strings within constraints
# XXX: some Duktape internal strings could just reuse existing strings
import os
import sys
import optparse
import dukutil
# Prefix for defines
define_prefix = 'DUK_STRIDX_'
#
# String lists
#
# Some strings may appear in multiple lists and even in multiple roles.
#
# XXX: currently the keywords are not recorded; use them later to organize
# strings more optimally
class BuiltinString:
name = None
section_b = None
browser_like = None
es6 = None
custom = None
internal = None
reserved_word = None
future_reserved_word = None
future_reserved_word_strict = None
special_literal = None
class_name = None
# computed
req_8bit = None
def __init__(self):
pass
def mkstr(x,
section_b=False,
browser_like=False,
es6=False,
commonjs=False,
custom=False,
internal=False,
reserved_word=False,
future_reserved_word=False,
future_reserved_word_strict=False,
special_literal=False,
class_name=False):
"Create a string object."
# A 0xff prefix (never part of valid UTF-8) is used for internal properties.
# It is encoded as 0x00 in generated init data for technical reasons: it
# keeps lookup table elements 7 bits instead of 8 bits. The initial byte
# of a Duktape internal string is always capitalized (e.g. \x00Value) so
# that user code can use clean lowercase prefixes like "\xFFptr".
if internal:
if len(x) < 1 or not (ord(x[0]) >= ord('A') and ord(x[0]) <= ord('Z')):
raise Exception('invalid internal key: %s' % repr(x))
x = '\x00' + x
ret = BuiltinString()
ret.name = x
ret.section_b = section_b
ret.browser_like = browser_like
ret.es6 = es6
ret.commonjs = commonjs
ret.custom = custom
ret.internal = internal
ret.reserved_word = reserved_word
ret.future_reserved_word = future_reserved_word
ret.future_reserved_word_strict = future_reserved_word_strict
ret.special_literal = special_literal
ret.class_name = class_name
ret.req_8bit = False
if class_name:
ret.req_8bit = True
return ret
# Standard built-in object related strings
standard_builtin_string_list = [
# internal class values
mkstr("Undefined", class_name=True), # sort of
mkstr("Null", class_name=True), # sort of
mkstr("Object", class_name=True),
mkstr("Function", class_name=True),
mkstr("Array", class_name=True),
mkstr("String", class_name=True),
mkstr("Boolean", class_name=True),
mkstr("Number", class_name=True),
mkstr("Date", class_name=True),
mkstr("RegExp", class_name=True),
mkstr("Error", class_name=True),
mkstr("Math", class_name=True),
mkstr("JSON", class_name=True),
mkstr("Arguments", class_name=True),
# built-in object names
mkstr("Object"),
mkstr("Function"),
mkstr("Array"),
mkstr("String"),
mkstr("Boolean"),
mkstr("Number"),
mkstr("Date"),
mkstr("RegExp"),
mkstr("Error"),
mkstr("EvalError"),
mkstr("RangeError"),
mkstr("ReferenceError"),
mkstr("SyntaxError"),
mkstr("TypeError"),
mkstr("URIError"),
mkstr("Math"),
mkstr("JSON"),
# Global object
mkstr("eval"),
mkstr("parseInt"),
mkstr("parseFloat"),
mkstr("isNaN"),
mkstr("isFinite"),
mkstr("decodeURI"),
mkstr("decodeURIComponent"),
mkstr("encodeURI"),
mkstr("encodeURIComponent"),
mkstr("escape", section_b=True),
mkstr("unescape", section_b=True),
mkstr("print", browser_like=True),
mkstr("alert", browser_like=True),
# Object constructor
mkstr("length"),
mkstr("prototype"),
mkstr("getPrototypeOf"),
mkstr("getOwnPropertyDescriptor"),
mkstr("getOwnPropertyNames"),
mkstr("create"),
mkstr("defineProperty"),
mkstr("defineProperties"),
mkstr("seal"),
mkstr("freeze"),
mkstr("preventExtensions"),
mkstr("isSealed"),
mkstr("isFrozen"),
mkstr("isExtensible"),
mkstr("keys"),
# Property descriptors
mkstr("value"),
mkstr("writable"),
mkstr("configurable"),
mkstr("enumerable"),
mkstr("get"),
mkstr("set"),
# Object prototype
mkstr("constructor"),
mkstr("toString"),
mkstr("toLocaleString"),
mkstr("valueOf"),
mkstr("hasOwnProperty"),
mkstr("isPrototypeOf"),
mkstr("propertyIsEnumerable"),
# Object instances
# no special properties
# Function constructor
mkstr("length"),
mkstr("prototype"),
# Function prototype
mkstr("constructor"),
mkstr("toString"),
mkstr("apply"),
mkstr("call"),
mkstr("bind"),
# Function instances
mkstr("length"),
mkstr("prototype"),
mkstr("caller"), # for bind() generated instances
mkstr("arguments"), # for bind() generated instances
# Array constructor
mkstr("length"),
mkstr("prototype"),
mkstr("isArray"),
# Array prototype
mkstr("constructor"),
mkstr("toString"),
mkstr("toLocaleString"),
mkstr("concat"),
mkstr("join"),
mkstr("pop"),
mkstr("push"),
mkstr("reverse"),
mkstr("shift"),
mkstr("slice"),
mkstr("sort"),
mkstr("splice"),
mkstr("unshift"),
mkstr("indexOf"),
mkstr("lastIndexOf"),
mkstr("every"),
mkstr("some"),
mkstr("forEach"),
mkstr("map"),
mkstr("filter"),
mkstr("reduce"),
mkstr("reduceRight"),
# Array instances
mkstr("length"),
# String constructor
mkstr("length"),
mkstr("prototype"),
mkstr("fromCharCode"),
# String prototype
mkstr("constructor"),
mkstr("toString"),
mkstr("valueOf"),
mkstr("charAt"),
mkstr("charCodeAt"),
mkstr("concat"),
mkstr("indexOf"),
mkstr("lastIndexOf"),
mkstr("localeCompare"),
mkstr("match"),
mkstr("replace"),
mkstr("search"),
mkstr("slice"),
mkstr("split"),
mkstr("substring"),
mkstr("toLowerCase"),
mkstr("toLocaleLowerCase"),
mkstr("toUpperCase"),
mkstr("toLocaleUpperCase"),
mkstr("trim"),
mkstr("substr", section_b=True),
# String instances
mkstr("length"),
# Boolean constructor
mkstr("length"),
mkstr("prototype"),
# Boolean prototype
mkstr("constructor"),
mkstr("toString"),
mkstr("valueOf"),
# Boolean instances
# no special properties
# Number constructor
mkstr("length"),
mkstr("prototype"),
mkstr("MAX_VALUE"),
mkstr("MIN_VALUE"),
mkstr("NaN"),
mkstr("NEGATIVE_INFINITY"),
mkstr("POSITIVE_INFINITY"),
# Number prototype
mkstr("constructor"),
mkstr("toString"),
mkstr("toLocaleString"),
mkstr("valueOf"),
mkstr("toFixed"),
mkstr("toExponential"),
mkstr("toPrecision"),
# Number instances
# no special properties
# Date constructor
mkstr("length"),
mkstr("prototype"),
mkstr("parse"),
mkstr("UTC"),
mkstr("now"),
# Date prototype
mkstr("constructor"),
mkstr("toString"),
mkstr("toDateString"),
mkstr("toTimeString"),
mkstr("toLocaleString"),
mkstr("toLocaleDateString"),
mkstr("toLocaleTimeString"),
mkstr("valueOf"),
mkstr("getTime"),
mkstr("getFullYear"),
mkstr("getUTCFullYear"),
mkstr("getMonth"),
mkstr("getUTCMonth"),
mkstr("getDate"),
mkstr("getUTCDate"),
mkstr("getDay"),
mkstr("getUTCDay"),
mkstr("getHours"),
mkstr("getUTCHours"),
mkstr("getMinutes"),
mkstr("getUTCMinutes"),
mkstr("getSeconds"),
mkstr("getUTCSeconds"),
mkstr("getMilliseconds"),
mkstr("getUTCMilliseconds"),
mkstr("getTimezoneOffset"),
mkstr("setTime"),
mkstr("setMilliseconds"),
mkstr("setUTCMilliseconds"),
mkstr("setSeconds"),
mkstr("setUTCSeconds"),
mkstr("setMinutes"),
mkstr("setUTCMinutes"),
mkstr("setHours"),
mkstr("setUTCHours"),
mkstr("setDate"),
mkstr("setUTCDate"),
mkstr("setMonth"),
mkstr("setUTCMonth"),
mkstr("setFullYear"),
mkstr("setUTCFullYear"),
mkstr("toUTCString"),
mkstr("toISOString"),
mkstr("toJSON"),
mkstr("getYear", section_b=True),
mkstr("setYear", section_b=True),
mkstr("toGMTString", section_b=True),
# Date instances
# no special properties
# RegExp constructor
mkstr("length"),
mkstr("prototype"),
# RegExp prototype
mkstr("constructor"),
mkstr("exec"),
mkstr("test"),
mkstr("toString"),
# RegExp instances
mkstr("source"),
mkstr("global"),
mkstr("ignoreCase"),
mkstr("multiline"),
mkstr("lastIndex"),
mkstr("(?:)"),
# RegExp exec() results
mkstr("index"),
mkstr("input"),
# Error constructor
mkstr("length"),
mkstr("prototype"),
# Error prototype
mkstr("constructor"),
mkstr("name"),
mkstr("message"),
mkstr("toString"),
# Error instances
# no special properties
# Error prototype / error fields (apply to all native errors in the spec)
mkstr("name"),
mkstr("message"),
# Math object
mkstr("E"),
mkstr("LN10"),
mkstr("LN2"),
mkstr("LOG2E"),
mkstr("LOG10E"),
mkstr("PI"),
mkstr("SQRT1_2"),
mkstr("SQRT2"),
mkstr("abs"),
mkstr("acos"),
mkstr("asin"),
mkstr("atan"),
mkstr("atan2"),
mkstr("ceil"),
mkstr("cos"),
mkstr("exp"),
mkstr("floor"),
mkstr("log"),
mkstr("max"),
mkstr("min"),
mkstr("pow"),
mkstr("random"),
mkstr("round"),
mkstr("sin"),
mkstr("sqrt"),
mkstr("tan"),
# JSON object
mkstr("parse"),
mkstr("stringify"),
]
# Other standard related strings
standard_other_string_list = [
# typeof - these produce unfortunate naming conflicts like "Object" vs "object"
mkstr("undefined"),
mkstr("boolean"),
mkstr("number"),
mkstr("string"),
mkstr("object"), # also returned for typeof null
mkstr("function"),
# type related
mkstr("undefined"),
mkstr("null"),
mkstr("true"),
mkstr("false"),
# special values
mkstr("length"),
mkstr("NaN"),
mkstr("Infinity"),
mkstr("+Infinity"),
mkstr("-Infinity"),
mkstr("0"),
mkstr("+0"),
mkstr("-0"),
mkstr("", class_name=True), # used as a class name for unused/invalid class
mkstr(","), # for array joining
mkstr(" "), # for print()
mkstr("\n\t"), # for tracebacks
mkstr("[...]"), # for tracebacks
mkstr("Invalid Date"), # for invalid Date instances
# arguments object (E5 Section 10.6)
mkstr("arguments"),
mkstr("callee"),
mkstr("caller"),
# "set" and "get" are strings we need in object literals but they are not
# ReservedWords.
mkstr("get"),
mkstr("set"),
]
# ES6 (draft) specific strings
es6_string_list = [
mkstr("Proxy", es6=True),
#mkstr("revocable", es6=True),
# Proxy trap names
mkstr("has", es6=True),
mkstr("set", es6=True),
mkstr("get", es6=True),
mkstr("deleteProperty", es6=True),
mkstr("enumerate", es6=True),
mkstr("ownKeys", es6=True),
mkstr("setPrototypeOf", es6=True),
mkstr("__proto__", es6=True),
]
# CommonJS related strings
commonjs_string_list = [
mkstr("require", commonjs=True),
mkstr("id", commonjs=True),
]
# Duktape specific strings
duk_string_list = [
# non-standard global properties
mkstr("Duktape", custom=True),
# non-standard class values
mkstr("global", custom=True, class_name=True), # implementation specific but shared by e.g. smjs and V8
mkstr("ObjEnv", custom=True, class_name=True),
mkstr("DecEnv", custom=True, class_name=True),
mkstr("Buffer", custom=True, class_name=True),
mkstr("Pointer", custom=True, class_name=True),
mkstr("Thread", custom=True, class_name=True),
mkstr("Logger", custom=True, class_name=True),
# non-standard built-in object names
mkstr("ThrowTypeError", custom=True), # implementation specific, matches V8
# non-standard error object (or Error.prototype) properties
mkstr("stack", custom=True),
mkstr("pc", custom=True),
mkstr("fileName", custom=True),
mkstr("lineNumber", custom=True),
#mkstr("code", custom=True),
mkstr("Tracedata", internal=True, custom=True),
# non-standard function instance properties
mkstr("name", custom=True), # function declaration/expression name (or empty)
mkstr("fileName", custom=True), # filename associated with function (shown in tracebacks)
# typeof - these produce unfortunate naming conflicts like "Object" vs "object"
mkstr("buffer", custom=True),
mkstr("pointer", custom=True),
# internal property for primitive value (Boolean, Number, String)
mkstr("Value", internal=True, custom=True),
# internal properties for enumerator objects
mkstr("Target", internal=True, custom=True),
mkstr("Next", internal=True, custom=True),
# internal properties for RegExp instances
mkstr("Bytecode", internal=True, custom=True),
# internal properties for function objects
mkstr("Formals", internal=True, custom=True),
mkstr("Varmap", internal=True, custom=True),
mkstr("Lexenv", internal=True, custom=True),
mkstr("Varenv", internal=True, custom=True),
mkstr("Source", internal=True, custom=True),
mkstr("Pc2line", internal=True, custom=True),
# internal properties for thread objects
# internal properties for bound function objects
mkstr("Target", internal=True, custom=True), # [[TargetFunction]]
mkstr("This", internal=True, custom=True), # [[BoundThis]]
mkstr("Args", internal=True, custom=True), # [[BoundArguments]]
# internal properties for argument objects
mkstr("Map", internal=True, custom=True),
mkstr("Callee", internal=True, custom=True),
# internal properties for general objects
#mkstr("Metatable", internal=True, custom=True),
mkstr("Finalizer", internal=True, custom=True),
# internal properties for Proxy objects
mkstr("Target", internal=True, custom=True), # [[ProxyTarget]]
mkstr("Handler", internal=True, custom=True), # [[ProxyHandler]]
# internal properties for declarative environment records
mkstr("Callee", internal=True, custom=True), # to access varmap
mkstr("Thread", internal=True, custom=True), # to identify valstack
mkstr("Regbase", internal=True, custom=True), # to determine absolute valstack index
# internal properties for object environment records
mkstr("Target", internal=True, custom=True), # target object
mkstr("This", internal=True, custom=True), # implicit this binding value
# fake filename for compiled functions
mkstr("compile", custom=True), # used as a filename for functions created with Function constructor
mkstr("input", custom=True), # used as a filename for eval temp function
# Duktape object
mkstr("errCreate", custom=True),
mkstr("errThrow", custom=True),
mkstr("modSearch", custom=True),
mkstr("modLoaded", custom=True),
mkstr("env", custom=True),
mkstr("version", custom=True),
mkstr("info", custom=True),
mkstr("act", custom=True),
mkstr("gc", custom=True),
mkstr("fin", custom=True),
mkstr("enc", custom=True),
mkstr("dec", custom=True),
mkstr("hex", custom=True), # enc/dec alg
mkstr("base64", custom=True), # enc/dec alg
mkstr("jx", custom=True), # enc/dec alg
mkstr("jc", custom=True), # enc/dec alg
mkstr("compact", custom=True),
# Buffer constructor
# Buffer prototype
# Pointer constructor
# Pointer prototype
# Thread constructor
mkstr("yield", custom=True),
mkstr("resume", custom=True),
mkstr("current", custom=True),
# Thread prototype
# Logger constructor
# Logger prototype and logger instances
mkstr("fmt", custom=True),
mkstr("raw", custom=True),
mkstr("trace", custom=True),
mkstr("debug", custom=True),
mkstr("info", custom=True),
mkstr("warn", custom=True),
mkstr("error", custom=True),
mkstr("fatal", custom=True),
mkstr("n", custom=True),
mkstr("l", custom=True),
# Auxiliary logger strings
mkstr("clog", custom=True), # C logger
# for controlling log formatting of objects
mkstr("toLogString", custom=True),
# special literals for custom json encodings
mkstr('{"_undef":true}', custom=True),
mkstr('{"_nan":true}', custom=True),
mkstr('{"_inf":true}', custom=True),
mkstr('{"_ninf":true}', custom=True),
mkstr('{"_func":true}', custom=True),
mkstr('{_func:true}', custom=True),
]
# Standard reserved words (non-strict mode + strict mode)
# Note: order must match DUK_TOK_XXX reserved defines in duk_types.h
standard_reserved_words_list = [
# E5 Section 7.6.1
# Keyword
mkstr("break", reserved_word=True),
mkstr("case", reserved_word=True),
mkstr("catch", reserved_word=True),
mkstr("continue", reserved_word=True),
mkstr("debugger", reserved_word=True),
mkstr("default", reserved_word=True),
mkstr("delete", reserved_word=True),
mkstr("do", reserved_word=True),
mkstr("else", reserved_word=True),
mkstr("finally", reserved_word=True),
mkstr("for", reserved_word=True),
mkstr("function", reserved_word=True),
mkstr("if", reserved_word=True),
mkstr("in", reserved_word=True),
mkstr("instanceof", reserved_word=True),
mkstr("new", reserved_word=True),
mkstr("return", reserved_word=True),
mkstr("switch", reserved_word=True),
mkstr("this", reserved_word=True),
mkstr("throw", reserved_word=True),
mkstr("try", reserved_word=True),
mkstr("typeof", reserved_word=True),
mkstr("var", reserved_word=True),
mkstr("void", reserved_word=True),
mkstr("while", reserved_word=True),
mkstr("with", reserved_word=True),
# Future reserved word
mkstr("class", reserved_word=True, future_reserved_word=True),
mkstr("const", reserved_word=True, future_reserved_word=True),
mkstr("enum", reserved_word=True, future_reserved_word=True),
mkstr("export", reserved_word=True, future_reserved_word=True),
mkstr("extends", reserved_word=True, future_reserved_word=True),
mkstr("import", reserved_word=True, future_reserved_word=True),
mkstr("super", reserved_word=True, future_reserved_word=True),
# E5 Section 7.8.1 and 7.8.2: special literals which the lexer
# basically treats like keywords
mkstr("null", special_literal=True),
mkstr("true", special_literal=True),
mkstr("false", special_literal=True),
# "set" and "get" are *NOT* reserved words and there is even code
# in the wild with statements like 'var set = 1;'. They are thus
# treated as ordinary identifiers and recognized by the compiler
# as tokens in a special way.
#mkstr("get"),
#mkstr("set"),
]
# Standard reserved words (strict mode only)
# Note: order must match DUK_TOK_XXX reserved defines in duk_types.h
standard_reserved_words_strict_string_list = [
# Future reserved word (additionally in strict mode)
mkstr("implements", reserved_word=True, future_reserved_word_strict=True),
mkstr("interface", reserved_word=True, future_reserved_word_strict=True),
mkstr("let", reserved_word=True, future_reserved_word_strict=True),
mkstr("package", reserved_word=True, future_reserved_word_strict=True),
mkstr("private", reserved_word=True, future_reserved_word_strict=True),
mkstr("protected", reserved_word=True, future_reserved_word_strict=True),
mkstr("public", reserved_word=True, future_reserved_word_strict=True),
mkstr("static", reserved_word=True, future_reserved_word_strict=True),
mkstr("yield", reserved_word=True, future_reserved_word_strict=True),
]
#
# Forced define names for specific strings for which automatic name generation
# does a bad job.
#
special_define_names = {
# typeof has name conflicts like "object" and "Object", broken with
# these unfortunately hacky defines
'undefined': 'LC_UNDEFINED',
'Undefined': 'UC_UNDEFINED',
'null': 'LC_NULL',
'Null': 'UC_NULL',
'object': 'LC_OBJECT',
'Object': 'UC_OBJECT',
'boolean': 'LC_BOOLEAN',
'Boolean': 'UC_BOOLEAN',
'number': 'LC_NUMBER',
'Number': 'UC_NUMBER',
'function': 'LC_FUNCTION',
'Function': 'UC_FUNCTION',
'string': 'LC_STRING',
'String': 'UC_STRING',
'arguments': 'LC_ARGUMENTS',
'Arguments': 'UC_ARGUMENTS',
'buffer': 'LC_BUFFER',
'Buffer': 'UC_BUFFER',
'pointer': 'LC_POINTER',
'Pointer': 'UC_POINTER',
#'thread': 'LC_THREAD',
'Thread': 'UC_THREAD',
#'logger': 'LC_LOGGER',
'Logger': 'UC_LOGGER',
'n': 'LC_N',
'l': 'LC_L',
'error': 'LC_ERROR',
'Error': 'UC_ERROR',
# log levels
'trace': 'LC_TRACE',
#'Trace': 'UC_TRACE',
'debug': 'LC_DEBUG',
#'Debug': 'UC_DEBUG',
'info': 'LC_INFO',
#'Info': 'UC_INFO',
'warn': 'LC_WARN',
#'Warn': 'UC_WARN',
#'error': 'LC_ERROR', # already above
#'Error': 'UC_ERROR',
'fatal': 'LC_FATAL',
#'Fatal': 'UC_FATAL',
'+Infinity': 'PLUS_INFINITY',
'-Infinity': 'MINUS_INFINITY',
'0': 'ZERO',
'+0': 'PLUS_ZERO',
'-0': 'MINUS_ZERO',
'NaN': 'NAN',
'isNaN': 'IS_NAN',
'MIN_VALUE': 'MIN_VALUE',
'MAX_VALUE': 'MAX_VALUE',
'NEGATIVE_INFINITY': 'NEGATIVE_INFINITY',
'POSITIVE_INFINITY': 'POSITIVE_INFINITY',
'(?:)': 'ESCAPED_EMPTY_REGEXP',
'Invalid Date': 'INVALID_DATE',
'decodeURIComponent': 'DECODE_URI_COMPONENT',
'encodeURIComponent': 'ENCODE_URI_COMPONENT',
'getUTCDate': 'GET_UTC_DATE',
'getUTCDay': 'GET_UTC_DAY',
'getUTCFullYear': 'GET_UTC_FULL_YEAR',
'getUTCHours': 'GET_UTC_HOURS',
'getUTCMilliseconds': 'GET_UTC_MILLISECONDS',
'getUTCMinutes': 'GET_UTC_MINUTES',
'getUTCMonth': 'GET_UTC_MONTH',
'getUTCSeconds': 'GET_UTC_SECONDS',
'setUTCDate': 'SET_UTC_DATE',
'setUTCDay': 'SET_UTC_DAY',
'setUTCFullYear': 'SET_UTC_FULL_YEAR',
'setUTCHours': 'SET_UTC_HOURS',
'setUTCMilliseconds': 'SET_UTC_MILLISECONDS',
'setUTCMinutes': 'SET_UTC_MINUTES',
'setUTCMonth': 'SET_UTC_MONTH',
'setUTCSeconds': 'SET_UTC_SECONDS',
'LOG10E': 'LOG10E',
'LOG2E': 'LOG2E',
'toISOString': 'TO_ISO_STRING',
'toUTCString': 'TO_UTC_STRING',
'toGMTString': 'TO_GMT_STRING',
'URIError': 'URI_ERROR',
'Duktape': 'DUKTAPE',
'': 'EMPTY_STRING',
',': 'COMMA',
' ': 'SPACE',
'\n\t': 'NEWLINE_TAB',
'[...]': 'BRACKETED_ELLIPSIS',
'{"_undef":true}': 'JSON_EXT_UNDEFINED',
'{"_nan":true}': 'JSON_EXT_NAN',
'{"_inf":true}': 'JSON_EXT_POSINF',
'{"_ninf":true}': 'JSON_EXT_NEGINF',
'{"_func":true}': 'JSON_EXT_FUNCTION1',
'{_func:true}': 'JSON_EXT_FUNCTION2',
}
#
# String table generation
#
# Get a define name for a string
def get_define_name(x):
x = x.name
if special_define_names.has_key(x):
return define_prefix + special_define_names[x]
is_internal = False
if len(x) >= 1 and x[0] == '\x00':
is_internal = True
x = x[1:]
res = ''
if is_internal:
res += 'INT_'
prev_upper = False
for idx, c in enumerate(x):
if c.isupper():
if (idx > 0 and not prev_upper):
res += '_'
res += c.upper()
prev_upper = c.isupper()
return define_prefix + res
def gen_strings_data_bitpacked(strlist):
be = dukutil.BitEncoder()
# Strings are encoded as follows: a string begins in lowercase
# mode and recognizes the following 5-bit symbols:
#
# 0-25 'a' ... 'z'
# 26 '_'
# 27 0x00 (actually decoded to 0xff, internal marker)
# 28 reserved
# 29 switch to uppercase for one character
# (next 5-bit symbol must be in range 0-25)
# 30 switch to uppercase
# 31 read a 7-bit character verbatim
#
# Uppercase mode is the same except codes 29 and 30 switch to
# lowercase.
UNDERSCORE = 26
ZERO = 27
SWITCH1 = 29
SWITCH = 30
SEVENBIT = 31
maxlen = 0
n_optimal = 0
n_switch1 = 0
n_switch = 0
n_sevenbit = 0
for s, d in strlist:
be.bits(len(s), 5)
if len(s) > maxlen:
maxlen = len(s)
# 5-bit character, mode specific
mode = 'lowercase'
for idx, c in enumerate(s):
# This encoder is not that optimal, but good enough for now.
islower = (ord(c) >= ord('a') and ord(c) <= ord('z'))
isupper = (ord(c) >= ord('A') and ord(c) <= ord('Z'))
islast = (idx == len(s) - 1)
isnextlower = False
isnextupper = False
if not islast:
c2 = s[idx+1]
isnextlower = (ord(c2) >= ord('a') and ord(c2) <= ord('z'))
isnextupper = (ord(c2) >= ord('A') and ord(c2) <= ord('Z'))
if c == '_':
be.bits(UNDERSCORE, 5)
n_optimal += 1
elif c == '\x00':
be.bits(ZERO, 5)
n_optimal += 1
elif islower and mode == 'lowercase':
be.bits(ord(c) - ord('a'), 5)
n_optimal += 1
elif isupper and mode == 'uppercase':
be.bits(ord(c) - ord('A'), 5)
n_optimal += 1
elif islower and mode == 'uppercase':
if isnextlower:
be.bits(SWITCH, 5)
be.bits(ord(c) - ord('a'), 5)
mode = 'lowercase'
n_switch += 1
else:
be.bits(SWITCH1, 5)
be.bits(ord(c) - ord('a'), 5)
n_switch1 += 1
elif isupper and mode == 'lowercase':
if isnextupper:
be.bits(SWITCH, 5)
be.bits(ord(c) - ord('A'), 5)
mode = 'uppercase'
n_switch += 1
else:
be.bits(SWITCH1, 5)
be.bits(ord(c) - ord('A'), 5)
n_switch1 += 1
else:
assert(ord(c) >= 0 and ord(c) <= 127)
be.bits(SEVENBIT, 5)
be.bits(ord(c), 7)
n_sevenbit += 1
#print 'sevenbit for: %r' % c
# end marker not necessary, C code knows length from define
res = be.getByteString()
print ('%d strings, %d bytes of string init data, %d maximum string length, ' + \
'encoding: optimal=%d,switch1=%d,switch=%d,sevenbit=%d') % \
(len(strlist), len(res), maxlen, \
n_optimal, n_switch1, n_switch, n_sevenbit)
return res, maxlen
def gen_string_list():
# Strings are ordered in the result as follows:
# 1. Strings not in either of the following two categories
# 2. Reserved words in strict mode only
# 3. Reserved words in both non-strict and strict mode
#
# Reserved words must follow an exact order because they are
# translated to/from token numbers by addition/subtraction.
# The remaining strings (in category 1) must be ordered so
# that those strings requiring an 8-bit index must be in the
# beginning.
#
# XXX: quite hacky, rework.
strlist = []
num_nonstrict_reserved = None
num_strict_reserved = None
num_all_reserved = None
idx_start_reserved = None
idx_start_strict_reserved = None
def _add(x, append):
n_str = x.name
n_def = get_define_name(x)
for o_str, o_def in strlist:
if o_str == n_str and o_def == n_def:
# same string, same define => no action
return
if o_str == n_str and o_def != n_def:
# same string, different define => should not happen
raise Exception('same string, different define for %s' % n_str)
if o_str != n_str and o_def == n_def:
# different string, same define => need custom defines
raise Exception('different string, same define for %s' % n_str)
# all ok, add
if append:
strlist.append((n_str, n_def))
else:
strlist.insert(0, (n_str, n_def))
# Add reserved words in order of occurrence first. The order matters
# because the string indices must be convertible to token numbers by
# addition/subtraction.
for i in standard_reserved_words_list:
_add(i, True)
num_nonstrict_reserved = len(strlist)
for i in standard_reserved_words_strict_string_list:
_add(i, True)
num_all_reserved = len(strlist)
num_strict_reserved = num_all_reserved - num_nonstrict_reserved
# Figure out, for the remaining strings, which strings need to be
# in the 8-bit range. Note that a certain string may appear multiple
# times in different roles (e.g. as a class name and a built-in object
# name) so check every occurrence.
req_8bit = {}
str_lists = [ standard_builtin_string_list,
standard_other_string_list,
es6_string_list,
commonjs_string_list,
duk_string_list ]
for lst in str_lists:
for i in lst:
if i.req_8bit:
req_8bit[i.name] = True
# Prepend strings not requiring 8-bit indices first; then prepend
# strings requiring 8-bit indices (as early as possible).
for lst in str_lists:
for i in lst:
if req_8bit.has_key(i.name):
continue
_add(i, False)
for lst in str_lists:
for i in lst:
_add(i, False)
# Check that 8-bit string constraints are satisfied
for i,v in enumerate(strlist):
name, defname = v[0], v[1]
if req_8bit.has_key(name):
if i >= 256:
raise Exception('8-bit string index not satisfied: ' + repr(v))
#for i,v in enumerate(strlist):
# print(i,v)
idx_start_reserved = len(strlist) - num_all_reserved
idx_start_strict_reserved = len(strlist) - num_strict_reserved
return strlist, idx_start_reserved, idx_start_strict_reserved
class GenStrings:
strlist = None # list of (name, define) pairs
strdata = None # bit packed initializer data
idx_start_reserved = None # start of reserved keywords
idx_start_strict_reserved = None # start of strict reserved keywords
maxlen = None # length of longest string
string_to_index = None # map of name -> index
define_to_index = None # map of define name -> index
def __init__(self):
pass
def processStrings(self):
self.strlist, self.idx_start_reserved, self.idx_start_strict_reserved = gen_string_list()
self.strdata, self.maxlen = gen_strings_data_bitpacked(self.strlist)
# initialize lookup maps
self.string_to_index = {}
self.define_to_index = {}
idx = 0
for s, d in self.strlist:
self.string_to_index[s] = idx
self.define_to_index[d] = idx
idx += 1
def stringToIndex(self, x):
return self.string_to_index[x]
def defineToIndex(self, x):
return self.define_to_index[x]
def hasString(self, x):
return self.string_to_index.has_key(x)
def hasDefine(self, x):
return self.define_to_index.has_key(x)
def emitStringsData(self, genc):
genc.emitArray(self.strdata, 'duk_strings_data', visibility='DUK_INTERNAL', typename='duk_uint8_t', intvalues=True, const=True, size=len(self.strdata))
genc.emitLine('')
genc.emitLine('/* to convert a heap stridx to a token number, subtract')
genc.emitLine(' * DUK_STRIDX_START_RESERVED and add DUK_TOK_START_RESERVED.')
genc.emitLine(' */')
def emitStringsHeader(self, genc):
genc.emitLine('#if !defined(DUK_SINGLE_FILE)')
genc.emitLine('DUK_INTERNAL_DECL const duk_uint8_t duk_strings_data[%d];' % len(self.strdata))
genc.emitLine('#endif /* !DUK_SINGLE_FILE */')
genc.emitLine('')
genc.emitDefine('DUK_STRDATA_DATA_LENGTH', len(self.strdata))
genc.emitDefine('DUK_STRDATA_MAX_STRLEN', self.maxlen)
genc.emitLine('')
idx = 0
for s, d in self.strlist:
genc.emitDefine(d, idx, repr(s))
idx += 1
genc.emitLine('')
idx = 0
for s, d in self.strlist:
defname = d.replace('_STRIDX','_HEAP_STRING')
genc.emitDefine(defname + '(heap)', 'DUK_HEAP_GET_STRING((heap),%s)' % d)
defname = d.replace('_STRIDX', '_HTHREAD_STRING')
genc.emitDefine(defname + '(thr)', 'DUK_HTHREAD_GET_STRING((thr),%s)' % d)
idx += 1
genc.emitLine('')
genc.emitDefine('DUK_HEAP_NUM_STRINGS', idx)
genc.emitLine('')
genc.emitDefine('DUK_STRIDX_START_RESERVED', self.idx_start_reserved)
genc.emitDefine('DUK_STRIDX_START_STRICT_RESERVED', self.idx_start_strict_reserved)
genc.emitDefine('DUK_STRIDX_END_RESERVED', len(self.strlist), comment='exclusive endpoint')
def getStringList(self):
strs = []
strs_base64 = []
for s, d in self.strlist:
# The 'strs' list has strings as-is, with U+0000 marking the
# internal prefix (it's not correct as runtime we use \xFF).
#
# The 'strs_base64' is byte exact to allow an application to
# use it for e.g. external strings optimization. The strings
# are encoded to UTF-8, internal prefix is replaced with \xFF,
# and the result is base-64 encoded to maintain byte exactness.
t = s.encode('utf-8')
if len(t) > 0 and t[0] == '\x00':
t = '\xff' + t[1:]
t = t.encode('base64')
if len(t) > 0 and t[-1] == '\n':
t = t[0:-1]
strs.append(s)
strs_base64.append(t)
return strs, strs_base64
|
|
#!/usr/bin/env python
"""Widgets to load and run opscore.actor.ScriptRunner scripts.
ScriptModuleWdg loads a script from a specified module.
ScriptFileWdg loads a script from a python source file
(i.e. a module, but one that need not be on the python path)
History:
2010-02-17 ROwen Adapted from RO.Wdg.ScriptWdg.
2010-03-10 ROwen Commented out a debug print statement.
2010-06-28 ROwen Removed two duplicate imports (thanks to pychecker).
2015-11-05 ROwen Ditched obsolete "except (SystemExit, KeyboardInterrupt): raise" code
"""
__all__ = ['BasicScriptWdg', 'ScriptModuleWdg', 'ScriptFileWdg']
import os.path
import Tkinter
import RO.Constants
import RO.AddCallback
import RO.Wdg
import opscore.actor
import StatusBar
# compute _StateSevDict which contains
# state:severity for non-normal severities
_StateSevDict = {}
_StateSevDict[opscore.actor.ScriptRunner.Paused] = RO.Constants.sevWarning
_StateSevDict[opscore.actor.ScriptRunner.Cancelled] = RO.Constants.sevWarning
_StateSevDict[opscore.actor.ScriptRunner.Failed] = RO.Constants.sevError
class _Blank(object):
def __init__(self):
object.__init__(self)
class _FakeButton:
def noop(self, *args, **kargs):
return
__init__ = noop
__setitem__ = noop
pack = noop
ctxSetConfigFunc = noop
class BasicScriptWdg(RO.AddCallback.BaseMixin):
"""Handles button enable/disable and such for a ScriptRunner.
You are responsible for creating and displaying the status bar(s)
and start, pause and cancel buttons.
Inputs:
- master master widget; the script functions may pack or grid stuff into this
- name script name; used to report status
- dispatcher keyword dispatcher; required to use the doCmd and startCmd methods
- runFunc run function (run when the start button pressed)
- statusBar script status bar, if any
- startButton button to start the script
The following inputs are optional:
- initFunc a function run once when the script is first loaded
- endFunc a function run when the script ends for any reason; None of undefined)
- cmdStatusBar command status bar, if any; may be the same as statusBar
- pauseButton button to pause/resume the script
- cancelButton button to cancel the script
- stateFunc function to call when the script runner changes state.
The function receives one argument: the script runner.
Notes:
- The text of the Pause button is automatically set (to Pause or Resume, as appropriate).
- You must set the text of the start and cancel buttons.
- Supports the RO.AddCallback interface for state function callbacks,
including addCallback and removeCallback
"""
def __init__(self,
master,
name,
dispatcher,
statusBar,
startButton,
scriptClass = None,
runFunc = None,
initFunc = None,
endFunc = None,
cmdStatusBar = None,
pauseButton = None,
cancelButton = None,
stateFunc = None,
):
RO.AddCallback.BaseMixin.__init__(self)
self.name = name
self.dispatcher = dispatcher
self.scriptRunner = None
if not pauseButton:
pauseButton = _FakeButton()
if not cancelButton:
cancelButton = _FakeButton()
self.scriptStatusBar = statusBar
self.cmdStatusBar = cmdStatusBar or statusBar
self.startButton = startButton
self.pauseButton = pauseButton
self.cancelButton = cancelButton
self.startButton["command"] = self._doStart
self.pauseButton["command"] = self._doPause
self.cancelButton["command"] = self._doCancel
self._makeScriptRunner(
master = master,
scriptClass = scriptClass,
initFunc = initFunc,
runFunc = runFunc,
endFunc = endFunc,
)
if stateFunc:
self.addCallback(stateFunc)
def _makeScriptRunner(self, master, scriptClass=None, initFunc=None, runFunc=None, endFunc=None):
"""Create a new script runner.
See ScriptRunner for the meaning of the arguments.
"""
self.scriptRunner = opscore.actor.ScriptRunner(
name = self.name,
dispatcher = self.dispatcher,
master = master,
scriptClass = scriptClass,
initFunc = initFunc,
runFunc = runFunc,
endFunc = endFunc,
stateFunc = self._stateFunc,
statusBar = self.scriptStatusBar,
cmdStatusBar = self.cmdStatusBar,
)
self._setButtonState()
def _doCancel(self):
"""Cancel the script.
"""
self.scriptRunner.cancel()
def _doPause(self):
"""Pause or resume script (depending on Pause button's text).
Note: the pause button's text is updated by _stateFunc.
"""
if self.pauseButton["text"] == "Resume":
self.scriptRunner.resume()
else:
self.scriptRunner.pause()
def _doStart(self):
"""Start script.
"""
self.scriptRunner.start()
def _setButtonState(self):
"""Set the state of the various buttons.
"""
# print "_setButtonState(); state=%r; isExecuting=%r" % (self.scriptRunner.state, self.scriptRunner.isExecuting,)
if self.scriptRunner.isExecuting:
self.startButton["state"] = "disabled"
self.pauseButton["state"] = "normal"
self.cancelButton["state"] = "normal"
else:
self.startButton["state"] = "normal"
self.pauseButton["state"] = "disabled"
self.cancelButton["state"] = "disabled"
if self.scriptRunner.isPaused:
self._setPauseText("Resume")
else:
self._setPauseText("Pause")
def _setPauseText(self, text):
"""Set the text and help text of the pause button.
"""
self.pauseButton["text"] = text
self.pauseButton.helpText = "%s the script" % text
def _stateFunc(self, *args):
"""Script state function callback.
"""
state, reason = self.scriptRunner.fullState
if reason:
msgStr = "%s: %s" % (state, reason)
else:
msgStr = state
severity = _StateSevDict.get(state, RO.Constants.sevNormal)
self.scriptStatusBar.setMsg(msgStr, severity)
self._setButtonState()
if self.scriptRunner.isDone:
if self.scriptRunner.didFail:
self.scriptStatusBar.playCmdFailed()
else:
self.scriptStatusBar.playCmdDone()
self._doCallbacks()
def _doCallbacks(self):
"""Execute the callback functions, passing the script runner as the argument.
"""
self._basicDoCallbacks(self.scriptRunner)
class _BaseUserScriptWdg(Tkinter.Frame, BasicScriptWdg):
"""Base class widget that runs a function via a ScriptRunner.
Subclasses must override _getScriptFuncs.
Inputs:
- master master Tk widget; when that widget is destroyed
the script function is cancelled.
- name script name; used to report status
- dispatcher keyword dispatcher; required to use the doCmd and startCmd methods
All remaining keyword arguments are sent to Tkinter.Frame.__init__
"""
def __init__(self,
master,
name,
dispatcher = None,
**kargs):
Tkinter.Frame.__init__(self, master, **kargs)
srArgs = self._getScriptFuncs(isFirst=True)
helpURL = srArgs.pop("HelpURL", None)
row = 0
self.scriptFrame = Tkinter.Frame(self)
self.scriptFrame.grid(row=row, column=0, sticky="news")
self.scriptFrameRow = row
self.rowconfigure(row, weight=1)
self.columnconfigure(0, weight=1)
row += 1
scriptStatusBar = StatusBar.StatusBar(
master = self,
helpURL = helpURL,
helpText = "script status and messages",
)
scriptStatusBar.grid(row=row, column=0, sticky="ew")
row += 1
cmdStatusBar = StatusBar.StatusBar(
master = self,
summaryLen = 30,
playCmdSounds = False,
helpURL = helpURL,
)
cmdStatusBar.grid(row=row, column=0, sticky="ew")
row += 1
buttonFrame = Tkinter.Frame(self)
startButton = RO.Wdg.Button(
master = buttonFrame,
text = "Start",
helpText = "Start the script",
helpURL = helpURL,
)
startButton.pack(side="left")
pauseButton = RO.Wdg.Button(
master = buttonFrame,
helpURL = helpURL,
)
pauseButton.pack(side="left")
cancelButton = RO.Wdg.Button(
master = buttonFrame,
text = "Cancel",
helpText = "Halt the script",
helpURL = helpURL,
)
cancelButton.pack(side="left")
buttonFrame.grid(row=row, column=0, sticky="w")
row += 1
# set up contextual menu functions for all widgets
# (except script frame, which is handled in reload)
startButton.ctxSetConfigFunc(self._setCtxMenu)
pauseButton.ctxSetConfigFunc(self._setCtxMenu)
cancelButton.ctxSetConfigFunc(self._setCtxMenu)
scriptStatusBar.ctxSetConfigFunc(self._setCtxMenu)
cmdStatusBar.ctxSetConfigFunc(self._setCtxMenu)
BasicScriptWdg.__init__(self,
master = self.scriptFrame,
name = name,
dispatcher = dispatcher,
statusBar = scriptStatusBar,
cmdStatusBar = cmdStatusBar,
startButton = startButton,
pauseButton = pauseButton,
cancelButton = cancelButton,
**srArgs)
def reload(self):
"""Create or recreate the script frame and script runner.
"""
# print "reload"
self.scriptStatusBar.setMsg("Reloading", RO.Constants.sevNormal)
try:
srArgs = self._getScriptFuncs(isFirst = False)
srArgs.pop("HelpURL", None) # don't send HelpURL arg to _makeScriptRunner
# destroy the script frame,
# which also cancels the script and its state callback
self.scriptFrame.grid_forget()
self.scriptFrame.destroy()
self.scriptRunner = None
self.scriptFrame = Tkinter.Frame(self)
self.scriptFrame.grid(row=self.scriptFrameRow, column=0, sticky="news")
self._makeScriptRunner(self.scriptFrame, **srArgs)
self.scriptStatusBar.setMsg("Reloaded", RO.Constants.sevNormal)
except Exception:
self.scriptStatusBar.setMsg("Reload failed; see error log", RO.Constants.sevError)
raise
def _getScriptFuncs(self, isFirst):
"""Return a dictionary containing either scriptClass
or one or more of initFunc, runFunc, endFunc;
it may also contain HelpURL.
Details:
- the script class is instantiated or initFunc called:
- once when this widget is built
- again each time the script is reloaded
- scriptObj.run or runFunc is called whenever the Start button is pushed.
- scriptObj.end or endFunc is called when runFunc ends for any reason
(finishes, fails or is cancelled); used for cleanup
where scriptObj represents the instantiated script class.
Specify None for init or end if undefined (run is required).
All functions receive one argument: sr, a ScriptRunner object.
The functions can pass information using sr.globals,
an initially empty object (to which you can add
instance variables and set or read them).
Inputs:
- isFirst True if the first execution
Warning: only the run function may call sr methods that wait.
The other functions may only run non-waiting code.
Must be defined by all subclasses.
"""
raise RuntimeError("Class %s must define _getScriptFuncs" % \
(self.__class__.__name__,))
def _setCtxMenu(self, menu):
"""Set the contextual menu for the status bar,
backgound frame and control buttons.
Returning True makes it automatically show help.
"""
menu.add_command(label = "Reload", command = self.reload)
return True
class ScriptModuleWdg(_BaseUserScriptWdg):
def __init__(self,
master,
module,
dispatcher,
):
"""Widget that runs a script from a module.
The module must contain either:
- a script class named ScriptClass
with a run method and an optional end method
or
- a function named "run" and optional functions:
- "init", if present, will be run once as the module is read
- "end", if present, will be run whenever "run" ends
(whether it succeeded, failed or was cancelled)
run, init and end all receive one argument: sr, an opscore.actor.ScriptRunner object.
ScriptClass.__init__ or init may populate sr.master with widgets.
sr.master is an empty frame above the status bar intended for this purpose.
(The run and end functions probably should NOT populate sr.master
with widgets because they are not initially executed and they
may be executed multiple times)
"""
self.module = module
_BaseUserScriptWdg.__init__(
self,
master = master,
name = module.__name__,
dispatcher = dispatcher,
)
def _getScriptFuncs(self, isFirst):
"""Return a dictionary containing either scriptClass
or one or more of initFunc, runFunc, endFunc;
it may also contain HelpURL.
"""
if not isFirst:
reload(self.module)
scriptClass = getattr(self.module, "ScriptClass", None)
if scriptClass:
return {"scriptClass": scriptClass}
retDict = {}
for attrName in ("run", "init", "end", "HelpURL"):
attr = getattr(self.module, attrName, None)
if attr:
retDict["%sFunc" % attrName] = attr
elif attrName == "run":
raise RuntimeError("%r has no %s function" % (self.module, attrName))
return retDict
class ScriptFileWdg(_BaseUserScriptWdg):
def __init__(self,
master,
filename,
dispatcher,
helpURL = None,
):
"""Widget that runs a script python source code file
(a python module, but one that need not be on the python path).
The file must contain either:
- a script class named ScriptClass
with a run method and an optional end method
or
- a function named "run" and optional functions:
- "init", if present, will be run once as the module is read
- "end", if present, will be run whenever "run" ends
(whether it succeeded, failed or was cancelled)
run, init and end all receive one argument: sr, an opscore.actor.ScriptRunner object.
ScriptClass.__init__ or init may populate sr.master with widgets.
sr.master is an empty frame above the status bar intended for this purpose.
(The run and end functions probably should NOT populate sr.master
with widgets because they are not initially executed and they
may be executed multiple times)
The file name must end in .py (any case)
"""
# print "ScriptFileWdg(%r, %r, %r)" % (master, filename, dispatcher)
self.filename = filename
self.fullPath = os.path.abspath(self.filename)
baseName = os.path.basename(self.filename)
scriptName, fileExt = os.path.splitext(baseName)
if fileExt.lower() != ".py":
raise RuntimeError("file name %r does not end in '.py'" % (self.filename,))
_BaseUserScriptWdg.__init__(
self,
master = master,
name = scriptName,
dispatcher = dispatcher,
helpURL = helpURL,
)
def copyPath(self):
"""Copy path to the clipboard.
"""
# print "copyPath"
self.clipboard_clear()
self.clipboard_append(self.fullPath)
def _setCtxMenu(self, menu):
"""Set the contextual menu for the status bar,
backgound frame and control buttons.
"""
# print "_setCtxMenu(%r)" % menu
menu.add_command(label = self.fullPath, state = "disabled")
menu.add_command(label = "Copy Path", command = self.copyPath)
menu.add_command(label = "Reload", command = self.reload)
return True
def _getScriptFuncs(self, isFirst=None):
"""Return a dictionary containing either scriptClass
or one or more of initFunc, runFunc, endFunc;
it may also contain HelpURL.
"""
# print "_getScriptFuncs(%s)" % isFirst
scriptLocals = {"__file__": self.fullPath}
execfile(self.filename, scriptLocals)
retDict = {}
helpURL = scriptLocals.get("HelpURL")
if helpURL:
retDict["HelpURL"] = helpURL
scriptClass = scriptLocals.get("ScriptClass")
if scriptClass:
retDict["scriptClass"] = scriptClass
return retDict
for attrName in ("run", "init", "end"):
attr = scriptLocals.get(attrName)
if attr:
retDict["%sFunc" % attrName] = attr
elif attrName == "run":
raise RuntimeError("%r has no %s function" % (self.filename, attrName))
return retDict
if __name__ == "__main__":
import TUI.Models.TUIModel
import TestScriptWdg
tuiModel = TUI.Models.TUIModel.Model(True)
dispatcher = tuiModel.dispatcher
tuiModel.tkRoot.title('Script 1 (tuiModel.tkRoot)')
testTL1 = tuiModel.tkRoot
sr1 = ScriptModuleWdg(
master = testTL1,
module = TestScriptWdg,
dispatcher = dispatcher,
)
sr1.pack()
testTL1.title(sr1.scriptRunner.name)
testTL1.resizable(False, False)
testTL2 = Tkinter.Toplevel()
currDir = os.path.dirname(__file__)
sr2 = ScriptFileWdg(
master = testTL2,
filename = os.path.join(currDir, 'TestScriptWdg.py'),
dispatcher = dispatcher,
)
sr2.pack()
testTL2.title(sr2.scriptRunner.name)
tuiModel.tkRoot.resizable(False, False)
tuiModel.reactor.run()
|
|
from datetime import datetime
from .base import MarathonResource, MarathonObject, assert_valid_path
from .constraint import MarathonConstraint
from .container import MarathonContainer
from .deployment import MarathonDeployment
from .task import MarathonTask
class MarathonApp(MarathonResource):
"""Marathon Application resource.
See: https://mesosphere.github.io/marathon/docs/rest-api.html#post-/v2/apps
:param list[str] accepted_resource_roles: a list of resource roles (the resource offer
must contain at least one of these for the app
to be launched on that host)
:param list[str] args: args form of the command to run
:param int backoff_factor: multiplier for subsequent backoff
:param int backoff_seconds: base time, in seconds, for exponential backoff
:param str cmd: cmd form of the command to run
:param constraints: placement constraints
:type constraints: list[:class:`marathon.models.constraint.MarathonConstraint`] or list[tuple]
:param container: container info
:type container: :class:`marathon.models.container.MarathonContainer` or dict
:param float cpus: cpus required per instance
:param list[str] dependencies: services (app IDs) on which this app depends
:param int disk: disk required per instance
:param deployments: (read-only) currently running deployments that affect this app
:type deployments: list[:class:`marathon.models.deployment.MarathonDeployment`]
:param dict env: env vars
:param str executor: executor
:param health_checks: health checks
:type health_checks: list[:class:`marathon.models.MarathonHealthCheck`] or list[dict]
:param str id: app id
:param int instances: instances
:param last_task_failure: last task failure
:type last_task_failure: :class:`marathon.models.app.MarathonTaskFailure` or dict
:param float mem: memory (in MB) required per instance
:type port_definitions: list[:class:`marathon.models.app.PortDefinitions`] or list[dict]
:param list[int] ports: ports
:param bool require_ports: require the specified `ports` to be available in the resource offer
:param list[str] store_urls: store URLs
:param float task_rate_limit: (Removed in Marathon 0.7.0) maximum number of tasks launched per second
:param tasks: (read-only) tasks
:type tasks: list[:class:`marathon.models.task.MarathonTask`]
:param int tasks_running: (read-only) the number of running tasks
:param int tasks_staged: (read-only) the number of staged tasks
:param int tasks_healthy: (read-only) the number of healthy tasks
:param int tasks_unhealthy: (read-only) the number of unhealthy tasks
:param upgrade_strategy: strategy by which app instances are replaced during a deployment
:type upgrade_strategy: :class:`marathon.models.app.MarathonUpgradeStrategy` or dict
:param list[str] uris: uris
:param str user: user
:param str version: version id
:param version_info: time of last scaling, last config change
:type version_info: :class:`marathon.models.app.MarathonAppVersionInfo` or dict
:param task_stats: task statistics
:type task_stats: :class:`marathon.models.app.MarathonTaskStats` or dict
:param dict labels
:type readiness_checks: list[:class:`marathon.models.app.ReadinessChecks`] or list[dict]
:type residency: :class:`marathon.models.app.Residency` or dict
"""
UPDATE_OK_ATTRIBUTES = [
'args', 'backoff_factor', 'backoff_seconds', 'cmd', 'constraints', 'container', 'cpus', 'dependencies', 'disk',
'env', 'executor', 'health_checks', 'instances', 'labels', 'max_launch_delay_seconds', 'mem', 'ports', 'require_ports',
'store_urls', 'task_rate_limit', 'upgrade_strategy', 'uris', 'user', 'version'
]
"""List of attributes which may be updated/changed after app creation"""
CREATE_ONLY_ATTRIBUTES = ['id', 'accepted_resource_roles']
"""List of attributes that should only be passed on creation"""
READ_ONLY_ATTRIBUTES = [
'deployments', 'tasks', 'tasks_running', 'tasks_staged', 'tasks_healthy', 'tasks_unhealthy']
"""List of read-only attributes"""
def __init__(
self, accepted_resource_roles=None, args=None, backoff_factor=None, backoff_seconds=None, cmd=None,
constraints=None, container=None, cpus=None, dependencies=None, deployments=None, disk=None, env=None,
executor=None, health_checks=None, id=None, instances=None, labels=None, last_task_failure=None,
max_launch_delay_seconds=None, mem=None, ports=None, require_ports=None, store_urls=None,
task_rate_limit=None, tasks=None, tasks_running=None, tasks_staged=None, tasks_healthy=None,
tasks_unhealthy=None, upgrade_strategy=None, uris=None, user=None, version=None, version_info=None,
ip_address=None, fetch=None, task_stats=None, readiness_checks=None, port_definitions=None, residency=None):
# self.args = args or []
self.accepted_resource_roles = accepted_resource_roles
self.args = args
# Marathon 0.7.0-RC1 throws a validation error if this is [] and cmd is passed:
# "error": "AppDefinition must either contain a 'cmd' or a 'container'."
self.backoff_factor = backoff_factor
self.backoff_seconds = backoff_seconds
self.cmd = cmd
self.constraints = [
c if isinstance(c, MarathonConstraint) else MarathonConstraint(*c)
for c in (constraints or [])
]
self.container = container if (isinstance(container, MarathonContainer) or container is None) \
else MarathonContainer.from_json(container)
self.cpus = cpus
self.dependencies = dependencies or []
self.deployments = [
d if isinstance(
d, MarathonDeployment) else MarathonDeployment().from_json(d)
for d in (deployments or [])
]
self.disk = disk
self.env = env
self.executor = executor
self.health_checks = health_checks or []
self.health_checks = [
hc if isinstance(
hc, MarathonHealthCheck) else MarathonHealthCheck().from_json(hc)
for hc in (health_checks or [])
]
self.id = assert_valid_path(id)
self.instances = instances
self.labels = labels or {}
self.last_task_failure = last_task_failure if (isinstance(last_task_failure, MarathonTaskFailure) or last_task_failure is None) \
else MarathonTaskFailure.from_json(last_task_failure)
self.max_launch_delay_seconds = max_launch_delay_seconds
self.mem = mem
self.ports = ports or []
self.port_definitions = port_definitions or []
self.readiness_checks = readiness_checks or []
self.residency = residency
self.require_ports = require_ports
self.store_urls = store_urls or []
self.task_rate_limit = task_rate_limit
self.tasks = [
t if isinstance(t, MarathonTask) else MarathonTask().from_json(t)
for t in (tasks or [])
]
self.tasks_running = tasks_running
self.tasks_staged = tasks_staged
self.tasks_healthy = tasks_healthy
self.tasks_unhealthy = tasks_unhealthy
self.upgrade_strategy = upgrade_strategy if (isinstance(upgrade_strategy, MarathonUpgradeStrategy) or upgrade_strategy is None) \
else MarathonUpgradeStrategy.from_json(upgrade_strategy)
self.uris = uris or []
self.user = user
self.version = version
self.version_info = version_info if (isinstance(version_info, MarathonAppVersionInfo) or version_info is None) \
else MarathonAppVersionInfo.from_json(version_info)
self.task_stats = version_info if (isinstance(task_stats, MarathonTaskStats) or task_stats is None) \
else MarathonTaskStats.from_json(task_stats)
class MarathonHealthCheck(MarathonObject):
"""Marathon health check.
See https://mesosphere.github.io/marathon/docs/health-checks.html
:param str command: health check command (if protocol == 'COMMAND')
:param int grace_period_seconds: how long to ignore health check failures on initial task launch (before first healthy status)
:param int interval_seconds: how long to wait between health checks
:param int max_consecutive_failures: max number of consecutive failures before the task should be killed
:param str path: health check target path (if protocol == 'HTTP')
:param int port_index: target port as indexed in app's `ports` array
:param str protocol: health check protocol ('HTTP', 'TCP', or 'COMMAND')
:param int timeout_seconds: how long before a waiting health check is considered failed
:param bool ignore_http1xx: Ignore HTTP informational status codes 100 to 199.
:param dict kwargs: additional arguments for forward compatibility
"""
def __init__(
self, command=None, grace_period_seconds=None, interval_seconds=None, max_consecutive_failures=None,
path=None, port_index=None, protocol=None, timeout_seconds=None, ignore_http1xx=None, **kwargs):
self.command = command
self.grace_period_seconds = grace_period_seconds
self.interval_seconds = interval_seconds
self.max_consecutive_failures = max_consecutive_failures
self.path = path
self.port_index = port_index
self.protocol = protocol
self.timeout_seconds = timeout_seconds
self.ignore_http1xx = ignore_http1xx
# additional not previously known healthcheck attributes
for k, v in kwargs.items():
setattr(self, k, v)
class MarathonTaskFailure(MarathonObject):
"""Marathon Task Failure.
:param str app_id: application id
:param str host: mesos slave running the task
:param str message: error message
:param str task_id: task id
:param str state: task state
:param timestamp: when this task failed
:type timestamp: datetime or str
:param str version: app version with which this task was started
"""
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, app_id=None, host=None, message=None, task_id=None,
slave_id=None, state=None, timestamp=None, version=None):
self.app_id = app_id
self.host = host
self.message = message
self.task_id = task_id
self.slave_id = slave_id
self.state = state
self.timestamp = timestamp if (timestamp is None or isinstance(timestamp, datetime)) \
else datetime.strptime(timestamp, self.DATETIME_FORMAT)
self.version = version
class MarathonUpgradeStrategy(MarathonObject):
"""Marathon health check.
See https://mesosphere.github.io/marathon/docs/health-checks.html
:param float minimum_health_capacity: minimum % of instances kept healthy on deploy
"""
def __init__(self, maximum_over_capacity=None,
minimum_health_capacity=None):
self.maximum_over_capacity = maximum_over_capacity
self.minimum_health_capacity = minimum_health_capacity
class MarathonAppVersionInfo(MarathonObject):
"""Marathon App version info.
See release notes for Marathon v0.11.0
https://github.com/mesosphere/marathon/releases/tag/v0.11.0
:param str app_id: application id
:param str host: mesos slave running the task
"""
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, last_scaling_at=None, last_config_change_at=None):
self.last_scaling_at = self._to_datetime(last_scaling_at)
self.last_config_change_at = self._to_datetime(last_config_change_at)
def _to_datetime(self, timestamp):
if (timestamp is None or isinstance(timestamp, datetime)):
return timestamp
else:
return datetime.strptime(timestamp, self.DATETIME_FORMAT)
class MarathonTaskStats(MarathonObject):
"""Marathon task statistics
See https://mesosphere.github.io/marathon/docs/rest-api.html#taskstats-object-v0-11
:param started_after_last_scaling: contains statistics about all tasks that were started after the last scaling or restart operation.
:type started_after_last_scaling: :class:`marathon.models.app.MarathonTaskStatsType` or dict
:param with_latest_config: contains statistics about all tasks that run with the same config as the latest app version.
:type with_latest_config: :class:`marathon.models.app.MarathonTaskStatsType` or dict
:param with_outdated_config: contains statistics about all tasks that were started before the last config change which was not simply a restart or scaling operation.
:type with_outdated_config: :class:`marathon.models.app.MarathonTaskStatsType` or dict
:param total_summary: contains statistics about all tasks.
:type total_summary: :class:`marathon.models.app.MarathonTaskStatsType` or dict
"""
def __init__(self, started_after_last_scaling=None,
with_latest_config=None, with_outdated_config=None, total_summary=None):
self.started_after_last_scaling = started_after_last_scaling if \
(isinstance(started_after_last_scaling, MarathonTaskStatsType) or started_after_last_scaling is None) \
else MarathonTaskStatsType.from_json(started_after_last_scaling)
self.with_latest_config = with_latest_config if \
(isinstance(with_latest_config, MarathonTaskStatsType) or with_latest_config is None) \
else MarathonTaskStatsType.from_json(with_latest_config)
self.with_outdated_config = with_outdated_config if \
(isinstance(with_outdated_config, MarathonTaskStatsType) or with_outdated_config is None) \
else MarathonTaskStatsType.from_json(with_outdated_config)
self.total_summary = total_summary if \
(isinstance(total_summary, MarathonTaskStatsType) or total_summary is None) \
else MarathonTaskStatsType.from_json(total_summary)
class MarathonTaskStatsType(MarathonObject):
"""Marathon app task stats
:param stats: stast about app tasks
:type stats: :class:`marathon.models.app.MarathonTaskStatsStats` or dict
"""
def __init__(self, stats=None):
self.stats = stats if (isinstance(stats, MarathonTaskStatsStats) or stats is None)\
else MarathonTaskStatsStats.from_json(stats)
class MarathonTaskStatsStats(MarathonObject):
"""Marathon app task stats
:param counts: app task count breakdown
:type counts: :class:`marathon.models.app.MarathonTaskStatsCounts` or dict
:param life_time: app task life time stats
:type life_time: :class:`marathon.models.app.MarathonTaskStatsLifeTime` or dict
"""
def __init__(self, counts=None, life_time=None):
self.counts = counts if (isinstance(counts, MarathonTaskStatsCounts) or counts is None)\
else MarathonTaskStatsCounts.from_json(counts)
self.life_time = life_time if (isinstance(life_time, MarathonTaskStatsLifeTime) or life_time is None)\
else MarathonTaskStatsLifeTime.from_json(life_time)
class MarathonTaskStatsCounts(MarathonObject):
"""Marathon app task counts
Equivalent to tasksStaged, tasksRunning, tasksHealthy, tasksUnhealthy.
:param int staged: Staged task count
:param int running: Running task count
:param int healthy: Healthy task count
:param int unhealthy: unhealthy task count
"""
def __init__(self, staged=None,
running=None, healthy=None, unhealthy=None):
self.staged = staged
self.running = running
self.healthy = healthy
self.unhealthy = unhealthy
class MarathonTaskStatsLifeTime(MarathonObject):
"""Marathon app life time statistics
Measured from `"startedAt"` (timestamp of the Mesos TASK_RUNNING status update) of each running task until now
:param float average_seconds: Average seconds
:param float median_seconds: Median seconds
"""
def __init__(self, average_seconds=None, median_seconds=None):
self.average_seconds = average_seconds
self.median_seconds = median_seconds
class ReadinessCheck(MarathonObject):
"""Marathon readiness check: https://mesosphere.github.io/marathon/docs/readiness-checks.html
:param string name (Optional. Default: "readinessCheck"): The name used to identify this readiness check.
:param string protocol (Optional. Default: "HTTP"): Protocol of the requests to be performed. Either HTTP or HTTPS.
:param string path (Optional. Default: "/"): Path to the endpoint the task exposes to provide readiness status. Example: /path/to/readiness.
:param string port_name (Optional. Default: "http-api"): Name of the port to query as described in the portDefinitions. Example: http-api.
:param int interval_seconds (Optional. Default: 30 seconds): Number of seconds to wait between readiness checks.
:param int timeout_seconds (Optional. Default: 10 seconds): Number of seconds after which a readiness check times out, regardless of the response. This value must be smaller than interval_seconds.
:param list http_status_codes_for_ready (Optional. Default: [200]): The HTTP/HTTPS status code to treat as ready.
:param bool preserve_last_response (Optional. Default: false): If true, the last readiness check response will be preserved and exposed in the API as part of a deployment.
"""
def __init__(self, name=None, protocol=None, path=None, port_name=None, interval_seconds=None,
http_status_codes_for_ready=None, preserve_last_response=None):
self.name = name
self.protocol = protocol
self.path = path
self.port_name = port_name
self.interval_seconds = interval_seconds
self.http_status_codes_for_ready = http_status_codes_for_ready
self.preserve_last_response = preserve_last_response
class PortDefinition(MarathonObject):
"""Marathon port definitions: https://mesosphere.github.io/marathon/docs/ports.html
:param int port: The port
:param string protocol: tcp or udp
:param string name: (optional) the name of the port
:param dict labels: undocumented
"""
def __init__(self, port=None, protocol=None, name=None, labels=None):
self.port = port
self.protocol = protocol
self.name = name
self.labels = labels
class Residency(MarathonObject):
"""Declares how "resident" an app is: https://mesosphere.github.io/marathon/docs/persistent-volumes.html
:param int relaunch_escalation_timeout_seconds: How long marathon will try to relaunch where the volumes is, defaults to 3600
:param string task_lost_behavior: What to do after a TASK_LOST. See the official Marathon docs for options
"""
def __init__(self, relaunch_escalation_timeout_seconds=None, task_lost_behavior=None):
self.relaunch_escalation_timeout_seconds = relaunch_escalation_timeout_seconds
self.task_lost_behavior = task_lost_behavior
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Autoencoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import discretization
from tensor2tensor.layers import latent_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
def reverse_gradient(x, lr=1.0):
return -lr * x + tf.stop_gradient((1.0 + lr) * x)
@registry.register_model
class AutoencoderBasic(t2t_model.T2TModel):
"""A basic autoencoder, try with image_mnist_rev or image_cifar10_rev."""
def __init__(self, *args, **kwargs):
super(AutoencoderBasic, self).__init__(*args, **kwargs)
self._cur_bottleneck_tensor = None
self.is1d = None
@property
def num_channels(self):
# TODO(lukaszkaiser): is this a universal enough way to get channels?
try:
num_channels = self.hparams.problem.num_channels
except AttributeError:
num_channels = 1
return num_channels
def image_summary(self, name, image_logits, max_outputs=1):
"""Helper for image summaries that are safe on TPU."""
if len(image_logits.get_shape()) != 5:
tf.logging.info("Not generating image summary, maybe not an image.")
return
return tf.summary.image(
name,
common_layers.tpu_safe_image_summary(tf.argmax(image_logits, -1)),
max_outputs=max_outputs)
def embed(self, x):
"""Input embedding with a non-zero bias for uniform inputs."""
with tf.variable_scope("embed", reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
# Merge channels and depth before embedding.
x = tf.reshape(x, x_shape[:-2] + [x_shape[-2] * x_shape[-1]])
x = tf.layers.dense(
x,
self.hparams.hidden_size,
name="embed",
activation=common_layers.belu,
bias_initializer=tf.random_normal_initializer(stddev=0.01))
x = common_layers.layer_norm(x, name="ln_embed")
return common_attention.add_timing_signal_nd(x)
def bottleneck(self, x):
with tf.variable_scope("bottleneck"):
hparams = self.hparams
x = tf.layers.dense(x, hparams.bottleneck_bits, name="bottleneck")
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
noise = 2.0 * tf.random_uniform(common_layers.shape_list(x)) - 1.0
return tf.tanh(x) + noise * hparams.bottleneck_noise, 0.0
return tf.tanh(x), 0.0
def unbottleneck(self, x, res_size, reuse=None):
with tf.variable_scope("unbottleneck", reuse=reuse):
x = tf.layers.dense(x, res_size, name="dense")
return x
def make_even_size(self, x):
if not self.is1d:
return common_layers.make_even_size(x)
shape1 = x.get_shape().as_list()[1]
if shape1 is not None and shape1 % 2 == 0:
return x
x, _ = common_layers.pad_to_same_length(
x, x, final_length_divisible_by=2, axis=1)
return x
def encoder(self, x):
with tf.variable_scope("encoder"):
hparams = self.hparams
layers = []
kernel, strides = self._get_kernel_and_strides()
# Down-convolutions.
for i in range(hparams.num_hidden_layers):
x = self.make_even_size(x)
layers.append(x)
x = tf.layers.conv2d(
x,
hparams.hidden_size * 2**(i + 1),
kernel,
strides=strides,
padding="SAME",
activation=common_layers.belu,
name="conv_%d" % i)
x = common_layers.layer_norm(x, name="ln_%d" % i)
return x, layers
def decoder(self, x, encoder_layers):
del encoder_layers
with tf.variable_scope("decoder"):
hparams = self.hparams
kernel, strides = self._get_kernel_and_strides()
# Up-convolutions.
for i in range(hparams.num_hidden_layers):
j = hparams.num_hidden_layers - i - 1
x = tf.layers.conv2d_transpose(
x,
hparams.hidden_size * 2**j,
kernel,
strides=strides,
padding="SAME",
activation=common_layers.belu,
name="deconv_%d" % j)
x = common_layers.layer_norm(x, name="ln_%d" % i)
return x
def gumbel_sample(self, reconstr_gan):
hparams = self.hparams
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
vocab_size = self._problem_hparams.target_modality.top_dimensionality
reconstr_gan = tf.nn.log_softmax(reconstr_gan)
if is_training and hparams.gumbel_temperature > 0.0:
gumbel_samples = discretization.gumbel_sample(
common_layers.shape_list(reconstr_gan))
gumbel_samples *= hparams.gumbel_noise_factor
reconstr_gan += gumbel_samples
reconstr_sample = latent_layers.multinomial_sample(
reconstr_gan, temperature=hparams.gumbel_temperature)
reconstr_gan = tf.nn.softmax(reconstr_gan / hparams.gumbel_temperature)
else:
reconstr_sample = tf.argmax(reconstr_gan, axis=-1)
reconstr_gan = tf.nn.softmax(reconstr_gan / 0.1) # Sharpen a bit.
# Use 1-hot forward, softmax backward.
reconstr_hot = tf.one_hot(reconstr_sample, vocab_size)
reconstr_gan += reconstr_hot - tf.stop_gradient(reconstr_gan)
return reconstr_gan
def body(self, features):
hparams = self.hparams
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
vocab_size = self._problem_hparams.target_modality.top_dimensionality
encoder_layers = None
self.is1d = hparams.sample_width == 1
if hparams.mode != tf.estimator.ModeKeys.PREDICT:
labels = features["targets_raw"]
labels_shape = common_layers.shape_list(labels)
# handle videos
if len(labels.shape) == 5:
labels = common_layers.time_to_channels(labels)
shape = common_layers.shape_list(labels)
x = tf.one_hot(labels, vocab_size)
x = self.embed(x)
target_codes = x
if shape[2] == 1:
self.is1d = True
# Run encoder.
x, encoder_layers = self.encoder(x)
# Bottleneck.
b, b_loss = self.bottleneck(x)
xb_loss = 0.0
b_shape = common_layers.shape_list(b)
self._cur_bottleneck_tensor = b
b = self.unbottleneck(b, common_layers.shape_list(x)[-1])
if not is_training:
x = b
else:
l = 2**hparams.num_hidden_layers
warm_step = int(hparams.bottleneck_warmup_steps * 0.25 * l)
nomix_p = common_layers.inverse_lin_decay(warm_step) + 0.01
if common_layers.should_generate_summaries():
tf.summary.scalar("nomix_p_bottleneck", nomix_p)
rand = tf.random_uniform(common_layers.shape_list(x))
# This is the distance between b and x. Having this as loss helps learn
# the bottleneck function, but if we back-propagated to x it would be
# minimized by just setting x=0 and b=0 -- so we don't want too much
# of the influence of this, and we stop-gradient to not zero-out x.
x_stop = tf.stop_gradient(x)
xb_loss = tf.reduce_mean(tf.reduce_sum(tf.square(x_stop - b), axis=-1))
# To prevent this loss from exploding we clip at 1, but anneal clipping.
clip_max = 1.0 / common_layers.inverse_exp_decay(
warm_step, min_value=0.001)
xb_clip = tf.maximum(tf.stop_gradient(xb_loss), clip_max)
xb_loss *= clip_max / xb_clip
x = tf.where(tf.less(rand, nomix_p), b, x)
if hparams.gan_loss_factor != 0.0:
# Add a purely sampled batch on which we'll compute the GAN loss.
g = self.unbottleneck(
self.sample(shape=b_shape),
common_layers.shape_list(x)[-1],
reuse=True)
x = tf.concat([g, x], axis=0)
encoder_layers = [tf.concat([l, l], axis=0) for l in encoder_layers]
else:
if self._cur_bottleneck_tensor is None:
b = self.sample()
else:
b = self._cur_bottleneck_tensor
self._cur_bottleneck_tensor = b
res_size = self.hparams.hidden_size * 2**self.hparams.num_hidden_layers
res_size = min(res_size, hparams.max_hidden_size)
x = self.unbottleneck(b, res_size)
# Run decoder.
x = self.decoder(x, encoder_layers)
# Cut to the right size and mix before returning.
res = x
if hparams.mode != tf.estimator.ModeKeys.PREDICT:
res = x[:, :shape[1], :shape[2], :]
# Final dense layer.
res = tf.layers.dense(
res, self.num_channels * hparams.hidden_size, name="res_dense")
output_shape = common_layers.shape_list(res)[:-1] + [
self.num_channels, self.hparams.hidden_size
]
res = tf.reshape(res, output_shape)
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
if hparams.use_vq_loss:
(reconstr, _, _, _, _) = discretization.vq_loss(res, labels, vocab_size)
else:
reconstr = tf.layers.dense(res, vocab_size, name="autoencoder_final")
return reconstr, {"bottleneck_loss": 0.0}
if hparams.gan_loss_factor != 0.0:
res_gan, res = tf.split(res, 2, axis=0)
# Losses.
losses = {
"bottleneck_extra": b_loss,
"bottleneck_l2": hparams.bottleneck_l2_factor * xb_loss
}
if hparams.use_vq_loss:
vq_temperature = hparams.vq_temperature / common_layers.inverse_exp_decay(
hparams.gan_codes_warmup_steps * 1.2,
min_value=hparams.vq_temperature * 2)
if hparams.mode != tf.estimator.ModeKeys.TRAIN:
vq_temperature = None
with tf.variable_scope("vq_loss"):
(reconstr, _, target_codes, code_loss,
targets_loss) = discretization.vq_loss(
res, labels, vocab_size, temperature=vq_temperature)
losses["code_loss"] = code_loss * hparams.code_loss_factor
losses["training"] = targets_loss
else:
reconstr = tf.layers.dense(res, vocab_size, name="autoencoder_final")
targets_loss = tf.losses.sparse_softmax_cross_entropy(
logits=tf.reshape(reconstr, labels_shape + [vocab_size]),
labels=tf.reshape(labels, labels_shape))
losses["training"] = targets_loss
# GAN losses.
if hparams.gan_loss_factor != 0.0:
update_means_factor = common_layers.inverse_exp_decay(
hparams.gan_codes_warmup_steps, min_value=0.0001)
if hparams.use_vq_loss:
with tf.variable_scope("vq_loss", reuse=True):
update_means = tf.less(tf.random_uniform([]), update_means_factor)
reconstr_gan, gan_codes, _, code_loss_gan, _ = discretization.vq_loss(
res_gan,
labels,
vocab_size,
do_update=update_means,
temperature=vq_temperature)
reconstr_gan_nonoise = reconstr_gan
code_loss_gan *= hparams.code_loss_factor * update_means_factor
losses["code_loss_gan"] = code_loss_gan
else:
reconstr_gan = tf.layers.dense(
res_gan, vocab_size, name="autoencoder_final", reuse=True)
reconstr_gan_nonoise = reconstr_gan
reconstr_gan = self.gumbel_sample(reconstr_gan)
# Embed to codes.
gan_codes = self.embed(reconstr_gan)
# Add GAN loss if requested.
gan_loss = 0.0
if hparams.gan_loss_factor != 0.0:
self.image_summary("gan", reconstr_gan_nonoise)
def discriminate(x):
"""Run a dioscriminator depending on the hparams."""
if hparams.discriminator == "default":
return common_layers.deep_discriminator(
x, hparams.discriminator_batchnorm, is_training)
elif hparams.discriminator == "patched":
return common_layers.patch_discriminator(x)
elif hparams.discriminator == "single":
return common_layers.single_discriminator(
x,
hparams.discriminator_size,
hparams.discriminator_kernel_size,
hparams.discriminator_strides,
pure_mean=hparams.discriminator_pure_mean)
elif hparams.discriminator == "double":
return common_layers.double_discriminator(
x,
hparams.discriminator_size,
hparams.discriminator_kernel_size,
hparams.discriminator_strides,
pure_mean=hparams.discriminator_pure_mean)
else:
raise Exception("Unknown discriminator %s" % hparams.discriminator)
tc_shape = common_layers.shape_list(target_codes)
if len(tc_shape) > 4:
target_codes = tf.reshape(target_codes,
tc_shape[:-2] + [tc_shape[-1] * tc_shape[-2]])
gan_codes = tf.reshape(gan_codes,
tc_shape[:-2] + [tc_shape[-1] * tc_shape[-2]])
gan_lr = common_layers.inverse_exp_decay(
hparams.gan_codes_warmup_steps * 1.5)
rev_grad_gan_codes = reverse_gradient(gan_codes, lr=gan_lr)
gan_loss = common_layers.sliced_gan_loss(
target_codes,
rev_grad_gan_codes,
discriminate,
self.hparams.num_sliced_vecs,
do_tanh=hparams.sliced_do_tanh)
gan_loss *= hparams.gan_loss_factor * update_means_factor
losses["gan_loss"] = -gan_loss
self.image_summary("ae", reconstr)
logits = tf.reshape(reconstr, labels_shape + [vocab_size])
return logits, losses
def sample(self, features=None, shape=None):
del features
hp = self.hparams
div_x = 2**hp.num_hidden_layers
div_y = 1 if self.is1d else 2**hp.num_hidden_layers
size = [
hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y,
hp.bottleneck_bits
]
size = size if shape is None else shape
# Sample in [-1, 1] as the bottleneck is under tanh.
return 2.0 * tf.random_uniform(size) - 1.0
def encode(self, x):
"""Auto-encode x and return the bottleneck."""
features = {"targets": x}
self(features) # pylint: disable=not-callable
res = tf.maximum(0.0, self._cur_bottleneck_tensor) # Be 0/1 and not -1/1.
self._cur_bottleneck_tensor = None
return res
def infer(self, features, *args, **kwargs): # pylint: disable=arguments-differ
"""Produce predictions from the model by sampling."""
del args, kwargs
# Inputs and features preparation needed to handle edge cases.
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
# Sample and decode.
num_channels = self.num_channels
if "targets" not in features:
features["targets"] = tf.zeros(
[self.hparams.batch_size, 1, 1, num_channels], dtype=tf.int32)
logits, _ = self(features) # pylint: disable=not-callable
samples = tf.argmax(logits, axis=-1)
# Restore inputs to not confuse Estimator in edge cases.
if inputs_old is not None:
features["inputs"] = inputs_old
# Return samples.
return samples
def decode(self, bottleneck):
"""Auto-decode from the bottleneck and return the result."""
# Get the shape from bottleneck and num channels.
shape = common_layers.shape_list(bottleneck)
try:
num_channels = self.hparams.problem.num_channels
except AttributeError:
num_channels = 1
dummy_targets = tf.zeros(shape[:-1] + [num_channels])
# Set the bottleneck to decode.
if len(shape) > 4:
bottleneck = tf.squeeze(bottleneck, axis=[1])
bottleneck = 2 * bottleneck - 1 # Be -1/1 instead of 0/1.
self._cur_bottleneck_tensor = bottleneck
# Run decoding.
res = self.infer({"targets": dummy_targets})
self._cur_bottleneck_tensor = None
return res
def _get_kernel_and_strides(self):
hparams = self.hparams
kernel = (hparams.kernel_height, hparams.kernel_width)
kernel = (hparams.kernel_height, 1) if self.is1d else kernel
strides = (2, 1) if self.is1d else (2, 2)
return (kernel, strides)
@registry.register_model
class AutoencoderAutoregressive(AutoencoderBasic):
"""Autoencoder with an autoregressive part."""
def body(self, features):
hparams = self.hparams
# Run the basic autoencoder part first.
basic_result, losses = super(AutoencoderAutoregressive, self).body(features)
if hparams.autoregressive_mode == "none":
assert not hparams.autoregressive_forget_base
return basic_result, losses
if "training" in losses:
plain_training_loss = losses.pop("training")
losses["plain"] = plain_training_loss
res_shape = common_layers.shape_list(basic_result)
vocab_size = self._problem_hparams.target_modality.top_dimensionality
targets = tf.one_hot(features["targets_raw"], vocab_size)
# Prepare inputs for autoregressive modes.
if common_layers.shape_list(features["targets"])[1] == 1:
# This happens on the first step of predicitions.
assert hparams.mode == tf.estimator.ModeKeys.PREDICT
targets = tf.zeros_like(basic_result)
targets = self.embed(targets)
if hparams.autoregressive_gumbel_sample:
basic_hot = self.gumbel_sample(basic_result)
else:
basic_hot = basic_result
basic_result = self.embed(basic_hot)
shape = common_layers.shape_list(basic_result)
basic1d = tf.reshape(basic_result, [shape[0], -1, shape[-1]])
targets = tf.reshape(targets, common_layers.shape_list(basic_result))
# During autoregressive inference, don't resample.
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
if hasattr(hparams, "sampled_basic1d_tensor"):
basic1d = hparams.sampled_basic1d_tensor
else:
hparams.sampled_basic1d_tensor = basic1d
# Sometimes it's useful to look at non-autoregressive evals.
targets_dropout = targets
if (hparams.mode == tf.estimator.ModeKeys.EVAL and
hparams.autoregressive_eval_pure_autoencoder):
targets_dropout = tf.zeros_like(basic_result)
# Now combine the basic reconstruction with shifted targets.
targets1d = tf.reshape(targets_dropout, [shape[0], -1, shape[-1]])
targets_shifted = common_layers.shift_right_3d(targets1d)
concat1d = tf.concat([basic1d, targets_shifted], axis=-1)
# The forget_base hparam sets purely-autoregressive mode, no autoencoder.
if hparams.autoregressive_forget_base:
concat1d = tf.reshape(targets, [shape[0], -1, shape[-1]])
concat1d = common_layers.shift_right_3d(concat1d)
# The autoregressive part depends on the mode.
if hparams.autoregressive_mode == "conv3":
res = common_layers.conv1d(
concat1d,
hparams.hidden_size,
3,
padding="LEFT",
activation=common_layers.belu,
name="autoregressive_conv3")
res = tf.layers.dense(res, vocab_size, name="autoregressive_final")
return tf.reshape(res, res_shape), losses
if hparams.autoregressive_mode == "conv5":
res = common_layers.conv1d(
concat1d,
hparams.hidden_size,
5,
padding="LEFT",
activation=common_layers.belu,
name="autoregressive_conv5")
res = tf.layers.dense(res, vocab_size, name="autoregressive_final")
return tf.reshape(res, res_shape), losses
if hparams.autoregressive_mode == "sru":
res = common_layers.conv1d(
concat1d,
hparams.hidden_size,
3,
padding="LEFT",
activation=common_layers.belu,
name="autoregressive_sru_conv3")
res = common_layers.sru(res)
res = tf.layers.dense(res, vocab_size, name="autoregressive_final")
return tf.reshape(res, res_shape), losses
raise ValueError(
"Unsupported autoregressive mode: %s" % hparams.autoregressive_mode)
def infer(self, features, *args, **kwargs):
"""Produce predictions from the model by sampling."""
# Inputs and features preparation needed to handle edge cases.
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
# Sample first.
try:
num_channels = self.hparams.problem.num_channels
except AttributeError:
num_channels = 1
if "targets" not in features:
features["targets"] = tf.zeros(
[self.hparams.batch_size, 1, 1, num_channels], dtype=tf.int32)
logits, _ = self(features) # pylint: disable=not-callable
samples = common_layers.sample_with_temperature(logits, 0.0)
shape = common_layers.shape_list(samples)
# Sample again if requested for the autoregressive part.
extra_samples = self.hparams.autoregressive_decode_steps
for i in range(extra_samples):
if i == extra_samples - 2:
self.hparams.sampling_temp /= 2
if i == extra_samples - 1:
self.hparams.sampling_temp = 0.0
features["targets"] = samples
old_samples1d = tf.reshape(samples, [shape[0], -1, shape[3]])
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
logits, _ = self(features) # pylint: disable=not-callable
samples = common_layers.sample_with_temperature(
logits, self.hparams.sampling_temp)
samples1d = tf.reshape(samples, [shape[0], -1, shape[3]])
samples1d = tf.concat([old_samples1d[:, :i, :], samples1d[:, i:, :]],
axis=1)
samples = tf.reshape(samples1d, shape)
# Restore inputs to not confuse Estimator in edge cases.
if inputs_old is not None:
features["inputs"] = inputs_old
# Return samples.
return samples
@registry.register_model
class AutoencoderResidual(AutoencoderAutoregressive):
"""Residual autoencoder."""
def dropout(self, x):
is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
hparams = self.hparams
if hparams.dropout <= 0.0 or not is_training:
return x
warm_step = hparams.bottleneck_warmup_steps * 2**hparams.num_hidden_layers
dropout = common_layers.inverse_lin_decay(warm_step // 2) * hparams.dropout
return common_layers.dropout_with_broadcast_dims(
x, 1.0 - dropout, broadcast_dims=[-1])
def encoder(self, x):
with tf.variable_scope("encoder"):
hparams = self.hparams
layers = []
kernel, strides = self._get_kernel_and_strides()
residual_kernel = (hparams.residual_kernel_height,
hparams.residual_kernel_width)
residual_kernel1d = (hparams.residual_kernel_height, 1)
residual_kernel = residual_kernel1d if self.is1d else residual_kernel
residual_conv = tf.layers.conv2d
if hparams.residual_use_separable_conv:
residual_conv = tf.layers.separable_conv2d
# Down-convolutions.
for i in range(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % i):
x = self.make_even_size(x)
layers.append(x)
x = self.dropout(x)
filters = hparams.hidden_size * 2**(i + 1)
filters = min(filters, hparams.max_hidden_size)
x = common_attention.add_timing_signal_nd(x)
x = tf.layers.conv2d(
x,
filters,
kernel,
strides=strides,
padding="SAME",
activation=common_layers.belu,
name="strided")
y = x
y = tf.nn.dropout(y, 1.0 - hparams.residual_dropout)
for r in range(hparams.num_residual_layers):
residual_filters = filters
if r < hparams.num_residual_layers - 1:
residual_filters = int(
filters * hparams.residual_filter_multiplier)
y = residual_conv(
y,
residual_filters,
residual_kernel,
padding="SAME",
activation=common_layers.belu,
name="residual_%d" % r)
x += y
x = common_layers.layer_norm(x, name="ln")
return x, layers
def decoder(self, x, encoder_layers=None):
with tf.variable_scope("decoder"):
hparams = self.hparams
is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
kernel, strides = self._get_kernel_and_strides()
residual_kernel = (hparams.residual_kernel_height,
hparams.residual_kernel_width)
residual_kernel1d = (hparams.residual_kernel_height, 1)
residual_kernel = residual_kernel1d if self.is1d else residual_kernel
residual_conv = tf.layers.conv2d
if hparams.residual_use_separable_conv:
residual_conv = tf.layers.separable_conv2d
# Up-convolutions.
for i in range(hparams.num_hidden_layers):
j = hparams.num_hidden_layers - i - 1
if is_training:
nomix_p = common_layers.inverse_lin_decay(
int(hparams.bottleneck_warmup_steps * 0.25 * 2**j)) + 0.01
if common_layers.should_generate_summaries():
tf.summary.scalar("nomix_p_%d" % j, nomix_p)
filters = hparams.hidden_size * 2**j
filters = min(filters, hparams.max_hidden_size)
with tf.variable_scope("layer_%d" % i):
j = hparams.num_hidden_layers - i - 1
x = tf.layers.conv2d_transpose(
x,
filters,
kernel,
strides=strides,
padding="SAME",
activation=common_layers.belu,
name="strided")
y = x
for r in range(hparams.num_residual_layers):
residual_filters = filters
if r < hparams.num_residual_layers - 1:
residual_filters = int(
filters * hparams.residual_filter_multiplier)
y = residual_conv(
y,
residual_filters,
residual_kernel,
padding="SAME",
activation=common_layers.belu,
name="residual_%d" % r)
x += tf.nn.dropout(y, 1.0 - hparams.residual_dropout)
x = common_layers.layer_norm(x, name="ln")
x = common_attention.add_timing_signal_nd(x)
if encoder_layers is not None:
enc_x = encoder_layers[j]
enc_shape = common_layers.shape_list(enc_x)
x = x[:, :enc_shape[1], :enc_shape[2], :]
if is_training: # Mix at the beginning of training.
rand = tf.random_uniform(common_layers.shape_list(x))
x = tf.where(tf.less(rand, nomix_p), x, enc_x)
return x
@registry.register_model
class AutoencoderResidualVAE(AutoencoderResidual):
"""Residual VAE autoencoder."""
def bottleneck(self, x):
hparams = self.hparams
z_size = hparams.bottleneck_bits
x_shape = common_layers.shape_list(x)
with tf.variable_scope("vae"):
mu = tf.layers.dense(x, z_size, name="mu")
if hparams.mode != tf.estimator.ModeKeys.TRAIN:
return mu, 0.0 # No sampling or kl loss on eval.
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
epsilon = tf.random_normal(x_shape[:-1] + [z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss * hparams.kl_beta
def sample(self, features=None, shape=None):
del features
hparams = self.hparams
div_x = 2**hparams.num_hidden_layers
div_y = 1 if self.is1d else 2**hparams.num_hidden_layers
size = [
hparams.batch_size, hparams.sample_height // div_x,
hparams.sample_width // div_y, hparams.bottleneck_bits
]
size = size if shape is None else shape
return tf.random_normal(size)
@registry.register_model
class AutoencoderBasicDiscrete(AutoencoderAutoregressive):
"""Discrete autoencoder."""
def bottleneck(self, x):
hparams = self.hparams
x = tf.tanh(tf.layers.dense(x, hparams.bottleneck_bits, name="bottleneck"))
d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.random_uniform(common_layers.shape_list(x))
noise = 2.0 * tf.to_float(tf.less(hparams.bottleneck_noise, noise)) - 1.0
d *= noise
x = common_layers.mix(d, x, hparams.discretize_warmup_steps,
hparams.mode == tf.estimator.ModeKeys.TRAIN)
return x, 0.0
def sample(self, features=None, shape=None):
del features
hp = self.hparams
div_x = 2**hp.num_hidden_layers
div_y = 1 if self.is1d else 2**hp.num_hidden_layers
size = [
hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y,
hp.bottleneck_bits
]
size = size if shape is None else shape
rand = tf.random_uniform(size)
return 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0
@registry.register_model
class AutoencoderResidualDiscrete(AutoencoderResidual):
"""Discrete residual autoencoder."""
def variance_loss(self, b):
part = tf.random_uniform(common_layers.shape_list(b))
selection = tf.to_float(tf.less(part, tf.random_uniform([])))
selection_size = tf.reduce_sum(selection)
part_avg = tf.abs(tf.reduce_sum(b * selection)) / (selection_size + 1)
return part_avg
def bottleneck(self, x, bottleneck_bits=None): # pylint: disable=arguments-differ
if bottleneck_bits is not None:
old_bottleneck_bits = self.hparams.bottleneck_bits
self.hparams.bottleneck_bits = bottleneck_bits
res, loss = discretization.parametrized_bottleneck(x, self.hparams)
if bottleneck_bits is not None:
self.hparams.bottleneck_bits = old_bottleneck_bits
return res, loss
def unbottleneck(self, x, res_size, reuse=None):
with tf.variable_scope("unbottleneck", reuse=reuse):
return discretization.parametrized_unbottleneck(x, res_size, self.hparams)
def sample(self, features=None, shape=None):
del features
hp = self.hparams
div_x = 2**hp.num_hidden_layers
div_y = 1 if self.is1d else 2**hp.num_hidden_layers
size = [
hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y,
hp.bottleneck_bits
]
size = size if shape is None else shape
rand = tf.random_uniform(size)
res = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0
# If you want to set some first bits to a fixed value, do this:
# fixed = tf.zeros_like(rand) - 1.0
# nbits = 3
# res = tf.concat([fixed[:, :, :, :nbits], res[:, :, :, nbits:]], axis=-1)
return res
@registry.register_model
class AutoencoderOrderedDiscrete(AutoencoderResidualDiscrete):
"""Ordered discrete autoencoder."""
def bottleneck(self, x): # pylint: disable=arguments-differ
hparams = self.hparams
if hparams.unordered:
return super(AutoencoderOrderedDiscrete, self).bottleneck(x)
noise = hparams.bottleneck_noise
hparams.bottleneck_noise = 0.0 # We'll add noise below.
x, loss = discretization.parametrized_bottleneck(x, hparams)
hparams.bottleneck_noise = noise
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
# We want a number p such that p^bottleneck_bits = 1 - noise.
# So log(p) * bottleneck_bits = log(noise)
log_p = tf.log(1 - float(noise) / 2) / float(hparams.bottleneck_bits)
# Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits.
noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1))
# Having the no-noise mask, we can make noise just uniformly at random.
ordered_noise = tf.random_uniform(tf.shape(x))
# We want our noise to be 1s at the start and random {-1, 1} bits later.
ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise))
# Now we flip the bits of x on the noisy positions (ordered and normal).
x *= 2.0 * ordered_noise - 1
return x, loss
@registry.register_model
class AutoencoderStacked(AutoencoderResidualDiscrete):
"""A stacked autoencoder."""
def stack(self, b, size, bottleneck_bits, name):
with tf.variable_scope(name + "_stack"):
unb = self.unbottleneck(b, size)
enc = self.encoder(unb)
b, _ = self.bottleneck(enc, bottleneck_bits=bottleneck_bits)
return b
def unstack(self, b, size, bottleneck_bits, name):
with tf.variable_scope(name + "_unstack"):
unb = self.unbottleneck(b, size)
dec = self.decoder(unb)
pred = tf.layers.dense(dec, bottleneck_bits, name="pred")
pred_shape = common_layers.shape_list(pred)
pred1 = tf.reshape(pred, pred_shape[:-1] + [-1, 2])
x, y = tf.split(pred1, 2, axis=-1)
x = tf.squeeze(x, axis=[-1])
y = tf.squeeze(y, axis=[-1])
gt = 2.0 * tf.to_float(tf.less(x, y)) - 1.0
gtc = tf.tanh(y - x)
gt += gtc - tf.stop_gradient(gtc)
return gt, pred1
def stack_loss(self, b, b_pred, name):
with tf.variable_scope(name):
labels_discrete = tf.to_int32((b + 1.0) * 0.5)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels_discrete, logits=b_pred)
return tf.reduce_mean(loss)
def full_stack(self, b, x_size, bottleneck_bits, losses, is_training, i):
stack1_b = self.stack(b, x_size, bottleneck_bits, "step%d" % i)
if i > 1:
stack1_b = self.full_stack(stack1_b, 2 * x_size, 2 * bottleneck_bits,
losses, is_training, i - 1)
b1, b_pred = self.unstack(stack1_b, x_size, bottleneck_bits, "step%d" % i)
losses["stack%d_loss" % i] = self.stack_loss(b, b_pred, "step%d" % i)
b_shape = common_layers.shape_list(b)
if is_training:
condition = tf.less(tf.random_uniform([]), 0.5)
condition = tf.reshape(condition, [1] * len(b.shape))
condition = tf.tile(condition, b.shape)
b1 = tf.where(condition, b, b1)
return tf.reshape(b1, b_shape)
def body(self, features):
hparams = self.hparams
num_stacks = hparams.num_hidden_layers
hparams.num_hidden_layers = 1
is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN
if hparams.mode != tf.estimator.ModeKeys.PREDICT:
x = features["targets"]
shape = common_layers.shape_list(x)
is1d = shape[2] == 1
self.is1d = is1d
x, _ = common_layers.pad_to_same_length(
x, x, final_length_divisible_by=2**num_stacks, axis=1)
if not is1d:
x, _ = common_layers.pad_to_same_length(
x, x, final_length_divisible_by=2**num_stacks, axis=2)
# Run encoder.
x = self.encoder(x)
x_size = common_layers.shape_list(x)[-1]
# Bottleneck (mix during early training, not too important but stable).
b, b_loss = self.bottleneck(x)
losses = {"bottleneck0_loss": b_loss}
b = self.full_stack(b, 2 * x_size, 2 * hparams.bottleneck_bits, losses,
is_training, num_stacks - 1)
b = self.unbottleneck(b, x_size)
b = common_layers.mix(b, x, hparams.bottleneck_warmup_steps, is_training)
x = b
else:
b = self.sample()
res_size = self.hparams.hidden_size * 2**self.hparams.num_hidden_layers
res_size = min(res_size, hparams.max_hidden_size)
x = self.unbottleneck(b, res_size)
# Run decoder.
x = self.decoder(x)
if hparams.mode == tf.estimator.ModeKeys.PREDICT:
return x
# Cut to the right size and mix before returning.
res = x[:, :shape[1], :shape[2], :]
res = common_layers.mix(res, features["targets"],
hparams.bottleneck_warmup_steps // 2, is_training)
hparams.num_hidden_layers = num_stacks
return res, losses
@registry.register_hparams
def autoencoder_basic():
"""Basic autoencoder model."""
hparams = common_hparams.basic_params1()
hparams.optimizer = "Adam"
hparams.learning_rate_constant = 0.0002
hparams.learning_rate_warmup_steps = 500
hparams.learning_rate_schedule = "constant * linear_warmup"
hparams.label_smoothing = 0.0
hparams.batch_size = 128
hparams.hidden_size = 64
hparams.num_hidden_layers = 5
hparams.initializer = "uniform_unit_scaling"
hparams.initializer_gain = 1.0
hparams.weight_decay = 0.0
hparams.kernel_height = 4
hparams.kernel_width = 4
hparams.dropout = 0.05
hparams.add_hparam("max_hidden_size", 1024)
hparams.add_hparam("bottleneck_bits", 128)
hparams.add_hparam("bottleneck_noise", 0.1)
hparams.add_hparam("bottleneck_warmup_steps", 2000)
hparams.add_hparam("sample_height", 32)
hparams.add_hparam("sample_width", 32)
hparams.add_hparam("discriminator_batchnorm", True)
hparams.add_hparam("num_sliced_vecs", 20000)
hparams.add_hparam("sliced_do_tanh", int(True))
hparams.add_hparam("discriminator_size", 256)
hparams.add_hparam("discriminator_kernel_size", 6)
hparams.add_hparam("discriminator_strides", 4)
hparams.add_hparam("discriminator_pure_mean", int(False))
hparams.add_hparam("code_loss_factor", 1.0)
hparams.add_hparam("gan_codes_warmup_steps", 16000)
hparams.add_hparam("gan_loss_factor", 0.0)
hparams.add_hparam("bottleneck_l2_factor", 0.05)
hparams.add_hparam("gumbel_temperature", 0.5)
hparams.add_hparam("gumbel_noise_factor", 0.5)
hparams.add_hparam("vq_temperature", 0.001)
hparams.add_hparam("use_vq_loss", int(False))
hparams.add_hparam("discriminator", "double")
return hparams
@registry.register_hparams
def autoencoder_autoregressive():
"""Autoregressive autoencoder model."""
hparams = autoencoder_basic()
hparams.add_hparam("autoregressive_forget_base", False)
hparams.add_hparam("autoregressive_mode", "none")
hparams.add_hparam("autoregressive_decode_steps", 0)
hparams.add_hparam("autoregressive_eval_pure_autoencoder", False)
hparams.add_hparam("autoregressive_gumbel_sample", False)
return hparams
@registry.register_hparams
def autoencoder_residual():
"""Residual autoencoder model."""
hparams = autoencoder_autoregressive()
hparams.optimizer = "Adafactor"
hparams.clip_grad_norm = 1.0
hparams.learning_rate_constant = 0.5
hparams.learning_rate_warmup_steps = 500
hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay"
hparams.num_hidden_layers = 5
hparams.hidden_size = 64
hparams.max_hidden_size = 1024
hparams.add_hparam("num_residual_layers", 2)
hparams.add_hparam("residual_kernel_height", 3)
hparams.add_hparam("residual_kernel_width", 3)
hparams.add_hparam("residual_filter_multiplier", 2.0)
hparams.add_hparam("residual_dropout", 0.2)
hparams.add_hparam("residual_use_separable_conv", int(True))
hparams.add_hparam("kl_beta", 1.0)
return hparams
@registry.register_hparams
def autoencoder_residual_text():
"""Residual autoencoder model for text."""
hparams = autoencoder_residual()
hparams.bottleneck_bits = 32
hparams.batch_size = 1024
hparams.hidden_size = 64
hparams.max_hidden_size = 512
hparams.bottleneck_noise = 0.0
hparams.target_modality = "symbol:identity"
hparams.input_modalities = "symbol:identity"
hparams.autoregressive_mode = "none"
hparams.sample_width = 1
return hparams
@registry.register_hparams
def autoencoder_basic_discrete():
"""Basic autoencoder model."""
hparams = autoencoder_autoregressive()
hparams.num_hidden_layers = 5
hparams.hidden_size = 64
hparams.bottleneck_bits = 1024
hparams.bottleneck_noise = 0.1
hparams.add_hparam("discretize_warmup_steps", 16000)
return hparams
@registry.register_hparams
def autoencoder_residual_discrete():
"""Residual discrete autoencoder model."""
hparams = autoencoder_residual()
hparams.bottleneck_bits = 1024
hparams.bottleneck_noise = 0.05
hparams.add_hparam("discretize_warmup_steps", 16000)
hparams.add_hparam("bottleneck_kind", "tanh_discrete")
hparams.add_hparam("isemhash_noise_dev", 0.5)
hparams.add_hparam("isemhash_mix_prob", 0.5)
hparams.add_hparam("isemhash_filter_size_multiplier", 2.0)
hparams.add_hparam("vq_beta", 0.25)
hparams.add_hparam("vq_decay", 0.999)
hparams.add_hparam("vq_epsilon", 1e-5)
return hparams
@registry.register_hparams
def autoencoder_residual_discrete_big():
"""Residual discrete autoencoder model, big version."""
hparams = autoencoder_residual_discrete()
hparams.hidden_size = 128
hparams.max_hidden_size = 4096
hparams.bottleneck_noise = 0.1
hparams.residual_dropout = 0.4
return hparams
@registry.register_hparams
def autoencoder_ordered_discrete():
"""Ordered discrete autoencoder model."""
hparams = autoencoder_residual_discrete()
hparams.bottleneck_noise = 0.05 # Use 0.8 for ordered.
hparams.gan_loss_factor = 0.05
hparams.add_hparam("unordered", True)
return hparams
@registry.register_hparams
def autoencoder_ordered_discrete_image64():
"""Ordered discrete autoencoder model."""
hparams = autoencoder_ordered_discrete()
hparams.batch_size = 32
hparams.num_hidden_layers = 6
hparams.target_modality = "video:default"
hparams.input_modalities = "video:default"
return hparams
@registry.register_hparams
def autoencoder_ordered_discrete_patched():
"""Ordered discrete autoencoder model."""
hparams = autoencoder_ordered_discrete()
hparams.discriminator = "patched"
return hparams
@registry.register_hparams
def autoencoder_ordered_discrete_single():
"""Ordered discrete autoencoder model."""
hparams = autoencoder_ordered_discrete()
hparams.discriminator = "single"
return hparams
@registry.register_hparams
def autoencoder_ordered_discrete_hs256():
"""Ordered discrete autoencoder model."""
hparams = autoencoder_ordered_discrete()
hparams.hidden_size = 256
return hparams
@registry.register_hparams
def autoencoder_ordered_text():
"""Ordered discrete autoencoder model for text."""
hparams = autoencoder_ordered_discrete()
hparams.bottleneck_bits = 512
hparams.num_hidden_layers = 7
hparams.batch_size = 1024
hparams.autoregressive_mode = "conv5"
hparams.max_hidden_size = 1024
hparams.target_modality = "symbol:identity"
hparams.input_modalities = "symbol:identity"
hparams.sample_height = 128
hparams.sample_width = 1
return hparams
@registry.register_hparams
def autoencoder_ordered_text_small():
"""Ordered discrete autoencoder model for text, small version."""
hparams = autoencoder_ordered_text()
hparams.bottleneck_bits = 14
hparams.num_hidden_layers = 2
hparams.hidden_size = 64
hparams.max_hidden_size = 512
hparams.bottleneck_noise = 0.0
hparams.autoregressive_mode = "conv5"
hparams.sample_height = 4
return hparams
@registry.register_hparams
def autoencoder_ordered_discrete_vq():
"""Ordered discrete autoencoder model with VQ bottleneck."""
hparams = autoencoder_ordered_discrete()
hparams.bottleneck_kind = "vq"
hparams.bottleneck_bits = 16
return hparams
@registry.register_hparams
def autoencoder_discrete_pong():
"""Discrete autoencoder model for compressing pong frames."""
hparams = autoencoder_ordered_discrete()
hparams.num_hidden_layers = 2
hparams.bottleneck_bits = 24
hparams.batch_size = 2
hparams.bottleneck_noise = 0.2
hparams.max_hidden_size = 1024
hparams.gan_loss_factor = 0.0
return hparams
@registry.register_hparams
def autoencoder_discrete_cifar():
"""Discrete autoencoder model for compressing cifar."""
hparams = autoencoder_ordered_discrete()
hparams.bottleneck_noise = 0.0
hparams.bottleneck_bits = 90
hparams.num_hidden_layers = 2
hparams.hidden_size = 256
hparams.num_residual_layers = 4
hparams.batch_size = 32
hparams.learning_rate_constant = 1.0
return hparams
@registry.register_ranged_hparams
def autoencoder_range(rhp):
"""Tuning grid of the main autoencoder params."""
rhp.set_float("dropout", 0.01, 0.3)
rhp.set_float("gan_loss_factor", 0.01, 0.1)
rhp.set_float("bottleneck_l2_factor", 0.001, 0.1, scale=rhp.LOG_SCALE)
rhp.set_discrete("bottleneck_warmup_steps", [200, 2000])
rhp.set_float("gumbel_temperature", 0, 1)
rhp.set_float("gumbel_noise_factor", 0, 0.5)
@registry.register_ranged_hparams
def autoencoder_discrete_pong_range(rhp):
"""Narrow tuning grid."""
rhp.set_float("dropout", 0.0, 0.2)
rhp.set_discrete("max_hidden_size", [1024, 2048])
@registry.register_hparams
def autoencoder_stacked():
"""Stacked autoencoder model."""
hparams = autoencoder_residual_discrete()
hparams.bottleneck_bits = 128
return hparams
|
|
import re
from collections import defaultdict
from adder import utils
from adder import problem
class LogicOperator:
Conjuction = "&"
Disjunction = "|"
Negation = "!"
Implication = "=>"
Equivalence = "<=>"
Every = "V"
Exists = "E"
Very = "Very"
Fairly = "Fairly"
LogicOperator.AllRegex = [
(LogicOperator.Equivalence, re.compile(LogicOperator.Equivalence)),
(LogicOperator.Implication, re.compile(LogicOperator.Implication)),
(LogicOperator.Disjunction, re.compile(r"\|")),
(LogicOperator.Conjuction, re.compile(LogicOperator.Conjuction)),
(LogicOperator.Negation, re.compile(LogicOperator.Negation)),
(LogicOperator.Every, re.compile(r"\b{0}\b".format(LogicOperator.Every))),
(LogicOperator.Exists, re.compile(r"\b{0}\b".format(LogicOperator.Exists))),
(LogicOperator.Very, re.compile("ASDADADASDASDASD")),
(LogicOperator.Fairly, re.compile("ADADAAAAAAAAAAAAAADASDAS"))
]
class Braces:
Left = "("
Right = ")"
Placeholder = "#"
FO_Left = "$FO_LEFT$"
FO_Right = "$FO_RIGHT$"
@staticmethod
def remove_surrounding(sentence):
sentence = sentence.strip()
while (sentence[0] == Braces.Left and
sentence[-1] == Braces.Right):
braces = 1
for symbol in sentence[1:-1]:
if symbol == Braces.Left:
braces += 1
if symbol == Braces.Right:
braces -= 1
if braces == 0:
return sentence
sentence = sentence[1:-1].strip()
return sentence
@staticmethod
def flatten(text, is_first_order):
func = Braces.__flatten_fo if is_first_order else Braces.__flatten_prop
return func(text)
@staticmethod
def __flatten_prop(text):
tlb = False
braces = 0
result = ""
for index, symbol in enumerate(text):
if symbol == Braces.Left:
if braces == 0:
tlb = True
if tlb:
result += Braces.Left
braces += 1
elif symbol == Braces.Right:
braces -= 1
if braces == 0:
result += Braces.Right
if braces != 0:
tlb = False
else:
result += symbol
return result
@staticmethod
def __flatten_fo(text):
tlb = False
fo_braces = 0
braces = 0
result = ""
for index, symbol in enumerate(text):
if symbol == Braces.Left:
next_right = Braces.find_unbalanced_right(text[index + 1:])
between_braces = text[index: index + next_right + 1]
contains_operator = any(re.search(regex, between_braces)
for op, regex in LogicOperator.AllRegex)
if not contains_operator and \
index > 0 and text[index - 1] != " ":
fo_braces += 1
if contains_operator and braces == 0:
tlb = True
if fo_braces > 0:
result += Braces.FO_Left
elif tlb:
result += Braces.Left
braces += 1
elif symbol == Braces.Right:
braces -= 1
if fo_braces > 0:
fo_braces -= 1
result += Braces.FO_Right
elif braces == 0:
result += Braces.Right
if braces != 0 and fo_braces == 0:
tlb = False
else:
result += symbol
return result
@staticmethod
def replace(text):
braces = 0
tlb = False
result = ""
replacement_table = []
replacement_index = -1
for symbol in text:
if tlb and (braces != 1 or symbol != Braces.Right):
result += Braces.Placeholder
replacement_table[replacement_index] += symbol
else:
result += symbol
if symbol == Braces.Left:
if braces == 0:
tlb = True
replacement_table.append(Braces.Left)
replacement_index += 1
braces += 1
elif symbol == Braces.Right:
braces -= 1
if braces == 0:
tlb = False
replacement_table[replacement_index] += Braces.Right
replaced = re.sub("{0}+".format(Braces.Placeholder),
Braces.Placeholder, result)
return (replaced, replacement_table)
@staticmethod
def restore(text, replacement_table):
for entry in replacement_table:
text = text.replace("({0})".format(Braces.Placeholder), entry, 1)
return text
@staticmethod
def find_unbalanced_right(expression):
braces = 0
for index, symbol in enumerate(expression):
if symbol == '(':
braces += 1
elif symbol == ')':
braces -= 1
if braces == -1:
return index
class SkolemRegex:
COMMON = r"{0} ((?:\w+, ?)*(?:\w+))\("
SkolemRegex.EVERY = re.compile(SkolemRegex.COMMON.format(LogicOperator.Every))
SkolemRegex.EXISTS = re.compile(SkolemRegex.COMMON.format(LogicOperator.Exists))
class Skolemizer:
def __init__(self):
self.constants = 0
self.functions = 0
def find_all_variables(self, expression):
tree = self.__build_tree(expression)
reversed = self.__reverse_tree(tree)
vars = []
for existentials, parents in reversed.items():
vars += existentials.replace(" ", "").split(",")
return vars
def skolemize(self, expression):
tree = self.__build_tree(expression)
reversed = self.__reverse_tree(tree)
replacements = {}
for existentials, parents in reversed.items():
vars = existentials.replace(" ", "").split(",")
parents.remove(None)
universals = ",".join(parents).replace(" ", "").split(",")
for var in vars:
replacements[var] = self.skolemize_var(var, universals)
expression = re.sub(SkolemRegex.EXISTS, "(", expression)
for var, skolemized in replacements.items():
regex = r"\b{0}\b".format(var)
skolemizer = self.get_replacer(skolemized)
expression = re.sub(regex, skolemizer, expression)
return expression
def skolemize_var(self, var, universals):
if len(universals) == 1 and len(universals[0]) == 0:
self.constants += 1
return "SC{0}".format(self.constants)
else:
self.functions += 1
return "SF{0}({1})".format(self.functions, ", ".join(universals))
def get_replacer(self, skolemized):
def replacer(match):
if re.match(SkolemRegex.EXISTS, match.group()):
return match.group()
return skolemized
return replacer
def __reverse_tree(self, tree):
frontier = [(None, tree[None])]
reversed = defaultdict(list)
is_parent = {}
while len(frontier) > 0:
parent, node = frontier.pop(0)
for child in node:
if isinstance(child, str):
reversed[child] = reversed[parent] + [parent]
elif isinstance(child, dict):
child_name = list(child.keys())[0]
if len(child[child_name]) > 0:
frontier.append((child_name, child[child_name]))
reversed[child_name] = reversed[parent] + [parent]
elif isinstance(child, list):
for vars in child:
reversed[child] = reversed[parent] + [parent]
is_parent[parent] = True
return {key: value for key, value in reversed.items()
if key not in is_parent}
def __build_tree(self, expression, root=None):
universal = self.__first_universal(expression)
if not universal:
return {root: self.__all_existenstials(expression)}
left_end = universal.span()[1]
right_end = Braces.find_unbalanced_right(expression[left_end:])
if not right_end:
raise ParsingError("Unbalanced parenthesis")
right_end += left_end
left = expression[:universal.span()[0]]
middle = expression[left_end:right_end]
right = expression[right_end:]
left_tree = self.__build_tree(left, root)
middle_tree = self.__build_tree(middle, universal.group(1))
right_tree = self.__build_tree(right, root)
return {root: left_tree[root] + [middle_tree] + right_tree[root]}
def __first_universal(self, expression):
return re.search(SkolemRegex.EVERY, expression)
def __all_existenstials(self, expression):
return re.findall(SkolemRegex.EXISTS, expression)
def skolemize(expression, skolemizer=Skolemizer()):
return skolemizer.skolemize(expression)
class StandartizationReplacer:
REGEX = re.compile(r"\b[a-z][a-z0-9]*\b", re.ASCII)
GlobalIndex = 0
def __init__(self, var, use_global=True):
self.var = var
self.index = self.GlobalIndex if use_global else 0
self.replacements = {}
def __call__(self, match):
result = match.group()
if result not in self.replacements:
self.replacements[result] = self.var + str(self.index)
self.index += 1
return self.replacements[result]
def standardize_variables(expression, var="x", use_global=True):
replacer = StandartizationReplacer(var, use_global)
result = re.sub(replacer.REGEX, replacer, expression)
if use_global:
StandartizationReplacer.GlobalIndex = replacer.index
return result, replacer.replacements
class Substituer:
def __init__(self, theta):
self.theta = theta
if len(theta) == 0:
self.regex = r"(?!x)x"
else:
self.regex = r"(\b" + r"\b)|(\b".join(theta.keys()) + r"\b)"
self.regex = re.compile(self.regex)
def __call__(self, match):
return self.theta[match.group()]
def substitute(expression, theta):
subst = Substituer(theta)
return re.sub(subst.regex, subst, expression)
def propagate_substitutions(theta):
for key, value in theta.items():
for var in theta:
regex = r"\b{0}\b".format(var)
theta[key] = re.sub(regex, theta[var], theta[key])
return theta
def unify(expression1, expression2, theta=None):
theta = theta or {}
return __unify_implementation(expression1, expression2, theta)
def __unify_implementation(x, y, theta):
if theta == problem.FAILURE: return problem.FAILURE
elif x == y: return theta
elif __is_variable(x): return __unify_variable(x, y, theta)
elif __is_variable(y): return __unify_variable(y, x, theta)
elif isinstance(x, list) and isinstance(y, list):
head_unifier = __unify_implementation(x[0], y[0], theta)
return __unify_implementation(x[1:], y[1:], head_unifier)
xOperator, xOperands = __split_expression(x)
yOperator, yOperands = __split_expression(y)
if not xOperator or not yOperator:
return problem.FAILURE
unified_operators = __unify_implementation(xOperator, yOperator, theta)
return __unify_implementation(xOperands, yOperands, unified_operators)
def __is_variable(expression):
return isinstance(expression, str) and expression[0].islower()
def __unify_variable(var, expression, theta):
if var in theta:
return __unify_implementation(expression, theta[var], theta)
if expression in theta:
return __unify_implementation(var, theta[expression], theta)
# Skip occur-check
theta[var] = expression
return theta
def unify_substitutions(theta1, theta2):
theta = theta1.copy()
for x in theta2:
if x in theta1:
subst = unify(theta1[x], theta2[x], theta)
if subst is problem.FAILURE:
return subst
theta[x] = substitute(theta1[x], subst)
else:
theta[x] = theta2[x]
return theta
@utils.memoize
def __split_expression(expr, cache={}):
expr = expr.strip()
left_index = expr.find(Braces.Left)
if left_index == -1:
return (None, None)
replaced, table = Braces.replace(expr[left_index + 1: -1])
separator = "!SEPARATOR!"
replaced = Braces.restore(replaced.replace(", ", separator), table)
function = expr[:left_index]
args = replaced.split(separator)
return (function, args)
def find_variables_symbol(expression):
args = __split_expression(expression)[1]
if args is None or len(args) == 0:
return []
firstLevel = [var for var in args
if __is_variable(var)]
nestedLevels = [var
for expr in args
for var in find_variables_symbol(expr)
if not __is_variable(expr)]
return firstLevel + nestedLevels
def find_variables_expression(expression):
skolemizer = Skolemizer()
return skolemizer.find_all_variables(expression)
def is_subsumed_by(x, y):
"""
Returns true if y subsumes x (for example P(x) subsumes P(A) as it is more
abstract)
"""
varsX = __split_expression(x)[1]
theta = unify(x, y)
if theta is problem.FAILURE:
return False
return all(__is_variable(theta[var]) for var in theta.keys()
if var in varsX)
class DefiniteClause:
ReplacementIndex = 0
def __init__(self, text):
self.premises, self.conclusion = self.__parse(text)
self.is_fact = len(self.premises) == 0
def __parse(self, text):
if LogicOperator.Implication in text:
lhs, rhs = text.split(LogicOperator.Implication)
premises = [symbol.strip() for symbol in
lhs.split(LogicOperator.Conjuction)]
conclusions = [rhs.strip()]
else:
symbols = [symbol.strip() for symbol in
text.split(LogicOperator.Disjunction)]
premises = [symbol[1:] for symbol in symbols
if symbol[0] == LogicOperator.Negation]
conclusions = [symbol for symbol in symbols
if symbol[0] != LogicOperator.Negation]
if len(conclusions) != 1:
msg = "A clause must have EXACTLY one positive symbol"
raise utils.InvalidArgumentError(msg)
return (premises, conclusions[0])
def standardize(self, var="x"):
replacer = StandartizationReplacer(var)
for index, premise in enumerate(self.premises):
self.premises[index] = re.sub(replacer.REGEX, replacer, premise)
self.conclusion = re.sub(replacer.REGEX, replacer, self.conclusion)
StandartizationReplacer.GlobalIndex = replacer.index
def __str__(self):
return "{0} => {1}".format(" & ".join(self.premises), self.conclusion)
__repr__ = __str__
class DefiniteKnowledgeBase:
def __init__(self, chaining_solver, text=""):
self.raw_kb = [DefiniteClause(clause)
for clause in text.strip().split("\n")
if len(clause) != 0]
self.solver = chaining_solver
def ask(self, query):
return self.solver(self.raw_kb, query)
def tell(self, *args):
for sentence in args:
self.raw_kb.append(DefiniteClause(sentence.strip()))
class KnowledgeBase:
def __init__(self, parser, solver, text="",
max_clause_len=float("inf"), complete=True):
self.parser = parser
self.solver = solver
self.raw_kb = parse_knowledge_base(parser, text)
self.max_clause_len = max_clause_len
self.complete = complete
def ask(self, query):
return self.solver(self.raw_kb, query,
self.max_clause_len,
self.complete)
def tell(self, *args):
for sentence in args:
self.raw_kb += self.parser(sentence.strip())
def __eq__(self, other):
return set(self.raw_kb) == set(other.raw_kb)
def __neq__(self, other):
return not (self == other)
def parse_knowledge_base(parser, text):
return [clause
for sentence in text.strip().split("\n")
for clause in parser(sentence)]
|
|
"""Functions for working with STIX2 granular markings."""
from stix2 import exceptions
from stix2.markings import utils
from stix2.utils import is_marking
from stix2.versioning import new_version
def get_markings(obj, selectors, inherited=False, descendants=False, marking_ref=True, lang=True):
"""
Get all granular markings associated to with the properties.
Args:
obj: An SDO or SRO object.
selectors: string or list of selector strings relative to the SDO or
SRO in which the properties appear.
inherited (bool): If True, include markings inherited relative to the
properties.
descendants (bool): If True, include granular markings applied to any
children relative to the properties.
marking_ref (bool): If False, excludes markings that use
``marking_ref`` property.
lang (bool): If False, excludes markings that use ``lang`` property.
Raises:
InvalidSelectorError: If `selectors` fail validation.
Returns:
list: Marking identifiers that matched the selectors expression.
"""
selectors = utils.convert_to_list(selectors)
utils.validate(obj, selectors)
granular_markings = obj.get('granular_markings', [])
if not granular_markings:
return []
results = set()
for marking in granular_markings:
for user_selector in selectors:
for marking_selector in marking.get('selectors', []):
if any([
(user_selector == marking_selector), # Catch explicit selectors.
(user_selector.startswith(marking_selector) and inherited), # Catch inherited selectors.
(marking_selector.startswith(user_selector) and descendants),
]): # Catch descendants selectors
ref = marking.get('marking_ref')
lng = marking.get('lang')
if ref and marking_ref:
results.add(ref)
if lng and lang:
results.add(lng)
return list(results)
def set_markings(obj, marking, selectors, marking_ref=True, lang=True):
"""
Remove all granular markings associated with selectors and append a new
granular marking. Refer to `clear_markings` and `add_markings` for details.
Args:
obj: An SDO or SRO object.
selectors: string or list of selector strings relative to the SDO or
SRO in which the properties appear.
marking: identifier or list of marking identifiers that apply to the
properties selected by `selectors`.
marking_ref (bool): If False, markings that use the ``marking_ref``
property will not be removed.
lang (bool): If False, markings that use the ``lang`` property
will not be removed.
Returns:
A new version of the given SDO or SRO with specified markings removed
and new ones added.
"""
obj = clear_markings(obj, selectors, marking_ref, lang)
return add_markings(obj, marking, selectors)
def remove_markings(obj, marking, selectors):
"""
Remove a granular marking from the granular_markings collection. The method
makes a best-effort attempt to distinguish between a marking-definition
or language granular marking.
Args:
obj: An SDO or SRO object.
marking: identifier or list of marking identifiers that apply to the
properties selected by `selectors`.
selectors: string or list of selectors strings relative to the SDO or
SRO in which the properties appear.
Raises:
InvalidSelectorError: If `selectors` fail validation.
MarkingNotFoundError: If markings to remove are not found on
the provided SDO or SRO.
Returns:
A new version of the given SDO or SRO with specified markings removed.
"""
selectors = utils.convert_to_list(selectors)
marking = utils.convert_to_marking_list(marking)
utils.validate(obj, selectors)
granular_markings = obj.get('granular_markings')
if not granular_markings:
return obj
granular_markings = utils.expand_markings(granular_markings)
to_remove = []
for m in marking:
if is_marking(m):
to_remove.append({'marking_ref': m, 'selectors': selectors})
else:
to_remove.append({'lang': m, 'selectors': selectors})
remove = utils.build_granular_marking(to_remove).get('granular_markings')
if not any(marking in granular_markings for marking in remove):
raise exceptions.MarkingNotFoundError(obj, remove)
granular_markings = [
m for m in granular_markings if m not in remove
]
granular_markings = utils.compress_markings(granular_markings)
if granular_markings:
return new_version(obj, granular_markings=granular_markings, allow_custom=True)
else:
return new_version(obj, granular_markings=None, allow_custom=True)
def add_markings(obj, marking, selectors):
"""
Append a granular marking to the granular_markings collection. The method
makes a best-effort attempt to distinguish between a marking-definition
or language granular marking.
Args:
obj: An SDO or SRO object.
marking: identifier or list of marking identifiers that apply to the
properties selected by `selectors`.
selectors: list of type string, selectors must be relative to the TLO
in which the properties appear.
Raises:
InvalidSelectorError: If `selectors` fail validation.
Returns:
A new version of the given SDO or SRO with specified markings added.
"""
selectors = utils.convert_to_list(selectors)
marking = utils.convert_to_marking_list(marking)
utils.validate(obj, selectors)
granular_marking = []
for m in marking:
if is_marking(m):
granular_marking.append({'marking_ref': m, 'selectors': sorted(selectors)})
else:
granular_marking.append({'lang': m, 'selectors': sorted(selectors)})
if obj.get('granular_markings'):
granular_marking.extend(obj.get('granular_markings'))
granular_marking = utils.expand_markings(granular_marking)
granular_marking = utils.compress_markings(granular_marking)
return new_version(obj, granular_markings=granular_marking, allow_custom=True)
def clear_markings(obj, selectors, marking_ref=True, lang=True):
"""
Remove all granular markings associated with the selectors.
Args:
obj: An SDO or SRO object.
selectors: string or list of selectors strings relative to the SDO or
SRO in which the properties appear.
marking_ref (bool): If False, markings that use the ``marking_ref``
property will not be removed.
lang (bool): If False, markings that use the ``lang`` property
will not be removed.
Raises:
InvalidSelectorError: If `selectors` fail validation.
MarkingNotFoundError: If markings to remove are not found on
the provided SDO or SRO.
Returns:
A new version of the given SDO or SRO with specified markings cleared.
"""
selectors = utils.convert_to_list(selectors)
utils.validate(obj, selectors)
granular_markings = obj.get('granular_markings')
if not granular_markings:
return obj
granular_markings = utils.expand_markings(granular_markings)
granular_dict = utils.build_granular_marking([
{'selectors': selectors, 'marking_ref': 'N/A'},
{'selectors': selectors, 'lang': 'N/A'},
])
clear = granular_dict.get('granular_markings', [])
if not any(
clear_selector in sdo_selectors.get('selectors', [])
for sdo_selectors in granular_markings
for clear_marking in clear
for clear_selector in clear_marking.get('selectors', [])
):
raise exceptions.MarkingNotFoundError(obj, clear)
for granular_marking in granular_markings:
for s in selectors:
if s in granular_marking.get('selectors', []):
ref = granular_marking.get('marking_ref')
lng = granular_marking.get('lang')
if ref and marking_ref:
granular_marking['marking_ref'] = ''
if lng and lang:
granular_marking['lang'] = ''
granular_markings = utils.compress_markings(granular_markings)
if granular_markings:
return new_version(obj, granular_markings=granular_markings, allow_custom=True)
else:
return new_version(obj, granular_markings=None, allow_custom=True)
def is_marked(obj, marking=None, selectors=None, inherited=False, descendants=False):
"""
Check if field is marked by any marking or by specific marking(s).
Args:
obj: An SDO or SRO object.
marking: identifier or list of marking identifiers that apply to the
properties selected by `selectors`.
selectors (bool): string or list of selectors strings relative to the
SDO or SRO in which the properties appear.
inherited (bool): If True, return markings inherited from the given
selector.
descendants (bool): If True, return granular markings applied to any
children of the given selector.
Raises:
InvalidSelectorError: If `selectors` fail validation.
Returns:
bool: True if ``selectors`` is found on internal SDO or SRO collection.
False otherwise.
Note:
When a list of marking identifiers is provided, if ANY of the provided
marking identifiers match, True is returned.
"""
if selectors is None:
raise TypeError("Required argument 'selectors' must be provided")
selectors = utils.convert_to_list(selectors)
marking = utils.convert_to_marking_list(marking)
utils.validate(obj, selectors)
granular_markings = obj.get('granular_markings', [])
marked = False
markings = set()
for granular_marking in granular_markings:
for user_selector in selectors:
for marking_selector in granular_marking.get('selectors', []):
if any([
(user_selector == marking_selector), # Catch explicit selectors.
(user_selector.startswith(marking_selector) and inherited), # Catch inherited selectors.
(marking_selector.startswith(user_selector) and descendants),
]): # Catch descendants selectors
marking_ref = granular_marking.get('marking_ref', '')
lang = granular_marking.get('lang', '')
if marking and any(x == marking_ref for x in marking):
markings.update([marking_ref])
if marking and any(x == lang for x in marking):
markings.update([lang])
marked = True
if marking:
# All user-provided markings must be found.
return markings.issuperset(set(marking))
return marked
|
|
###############################################################################
#
# Worksheet - A class for writing Excel Worksheets.
#
# Copyright 2013-2017, John McNamara, jmcnamara@cpan.org
#
import re
import datetime
from warnings import warn
COL_NAMES = {}
range_parts = re.compile(r'(\$?)([A-Z]{1,3})(\$?)(\d+)')
def xl_rowcol_to_cell(row, col, row_abs=False, col_abs=False):
"""
Convert a zero indexed row and column cell reference to a A1 style string.
Args:
row: The cell row. Int.
col: The cell column. Int.
row_abs: Optional flag to make the row absolute. Bool.
col_abs: Optional flag to make the column absolute. Bool.
Returns:
A1 style string.
"""
row += 1 # Change to 1-index.
row_abs = '$' if row_abs else ''
col_str = xl_col_to_name(col, col_abs)
return col_str + row_abs + str(row)
def xl_rowcol_to_cell_fast(row, col):
"""
Optimized version of the xl_rowcol_to_cell function. Only used internally.
Args:
row: The cell row. Int.
col: The cell column. Int.
Returns:
A1 style string.
"""
if col in COL_NAMES:
col_str = COL_NAMES[col]
else:
col_str = xl_col_to_name(col)
COL_NAMES[col] = col_str
return col_str + str(row + 1)
def xl_col_to_name(col_num, col_abs=False):
"""
Convert a zero indexed column cell reference to a string.
Args:
col: The cell column. Int.
col_abs: Optional flag to make the column absolute. Bool.
Returns:
Column style string.
"""
col_num += 1 # Change to 1-index.
col_str = ''
col_abs = '$' if col_abs else ''
while col_num:
# Set remainder from 1 .. 26
remainder = col_num % 26
if remainder == 0:
remainder = 26
# Convert the remainder to a character.
col_letter = chr(ord('A') + remainder - 1)
# Accumulate the column letters, right to left.
col_str = col_letter + col_str
# Get the next order of magnitude.
col_num = int((col_num - 1) / 26)
return col_abs + col_str
def xl_cell_to_rowcol(cell_str):
"""
Convert a cell reference in A1 notation to a zero indexed row and column.
Args:
cell_str: A1 style string.
Returns:
row, col: Zero indexed cell row and column indices.
"""
if not cell_str:
return 0, 0
match = range_parts.match(cell_str)
col_str = match.group(2)
row_str = match.group(4)
# Convert base26 column string to number.
expn = 0
col = 0
for char in reversed(col_str):
col += (ord(char) - ord('A') + 1) * (26 ** expn)
expn += 1
# Convert 1-index to zero-index
row = int(row_str) - 1
col -= 1
return row, col
def xl_cell_to_rowcol_abs(cell_str):
"""
Convert an absolute cell reference in A1 notation to a zero indexed
row and column, with True/False values for absolute rows or columns.
Args:
cell_str: A1 style string.
Returns:
row, col, row_abs, col_abs: Zero indexed cell row and column indices.
"""
if not cell_str:
return 0, 0, False, False
match = range_parts.match(cell_str)
col_abs = match.group(1)
col_str = match.group(2)
row_abs = match.group(3)
row_str = match.group(4)
if col_abs:
col_abs = True
else:
col_abs = False
if row_abs:
row_abs = True
else:
row_abs = False
# Convert base26 column string to number.
expn = 0
col = 0
for char in reversed(col_str):
col += (ord(char) - ord('A') + 1) * (26 ** expn)
expn += 1
# Convert 1-index to zero-index
row = int(row_str) - 1
col -= 1
return row, col, row_abs, col_abs
def xl_range(first_row, first_col, last_row, last_col):
"""
Convert zero indexed row and col cell references to a A1:B1 range string.
Args:
first_row: The first cell row. Int.
first_col: The first cell column. Int.
last_row: The last cell row. Int.
last_col: The last cell column. Int.
Returns:
A1:B1 style range string.
"""
range1 = xl_rowcol_to_cell(first_row, first_col)
range2 = xl_rowcol_to_cell(last_row, last_col)
return range1 + ':' + range2
def xl_range_abs(first_row, first_col, last_row, last_col):
"""
Convert zero indexed row and col cell references to a $A$1:$B$1 absolute
range string.
Args:
first_row: The first cell row. Int.
first_col: The first cell column. Int.
last_row: The last cell row. Int.
last_col: The last cell column. Int.
Returns:
$A$1:$B$1 style range string.
"""
range1 = xl_rowcol_to_cell(first_row, first_col, True, True)
range2 = xl_rowcol_to_cell(last_row, last_col, True, True)
return range1 + ':' + range2
def xl_range_formula(sheetname, first_row, first_col, last_row, last_col):
"""
Convert worksheet name and zero indexed row and col cell references to
a Sheet1!A1:B1 range formula string.
Args:
sheetname: The worksheet name. String.
first_row: The first cell row. Int.
first_col: The first cell column. Int.
last_row: The last cell row. Int.
last_col: The last cell column. Int.
Returns:
A1:B1 style range string.
"""
cell_range = xl_range_abs(first_row, first_col, last_row, last_col)
sheetname = quote_sheetname(sheetname)
return sheetname + '!' + cell_range
def quote_sheetname(sheetname):
"""
Convert a worksheet name to a quoted name if it contains spaces or
special characters.
Args:
sheetname: The worksheet name. String.
Returns:
A quoted worksheet string.
"""
# TODO. Possibly extend this to quote sheetnames that look like ranges.
if not sheetname.isalnum() and not sheetname.startswith("'"):
# Double quote any single quotes.
sheetname = sheetname.replace("'", "''")
# Singe quote the sheet name.
sheetname = "'%s'" % sheetname
return sheetname
def xl_color(color):
# Used in conjunction with the XlsxWriter *color() methods to convert
# a color name into an RGB formatted string. These colors are for
# backward compatibility with older versions of Excel.
named_colors = {
'black': '#000000',
'blue': '#0000FF',
'brown': '#800000',
'cyan': '#00FFFF',
'gray': '#808080',
'green': '#008000',
'lime': '#00FF00',
'magenta': '#FF00FF',
'navy': '#000080',
'orange': '#FF6600',
'pink': '#FF00FF',
'purple': '#800080',
'red': '#FF0000',
'silver': '#C0C0C0',
'white': '#FFFFFF',
'yellow': '#FFFF00',
}
if color in named_colors:
color = named_colors[color]
if not re.match('#[0-9a-fA-F]{6}', color):
warn("Color '%s' isn't a valid Excel color" % color)
# Convert the RGB color to the Excel ARGB format.
return "FF" + color.lstrip('#').upper()
def get_rgb_color(color):
# Convert the user specified color to an RGB color.
rgb_color = xl_color(color)
# Remove leading FF from RGB color for charts.
rgb_color = re.sub(r'^FF', '', rgb_color)
return rgb_color
def get_sparkline_style(style_id):
styles = [
{'series': {'theme': "4", 'tint': "-0.499984740745262"},
'negative': {'theme': "5"},
'markers': {'theme': "4", 'tint': "-0.499984740745262"},
'first': {'theme': "4", 'tint': "0.39997558519241921"},
'last': {'theme': "4", 'tint': "0.39997558519241921"},
'high': {'theme': "4"},
'low': {'theme': "4"},
}, # 0
{'series': {'theme': "4", 'tint': "-0.499984740745262"},
'negative': {'theme': "5"},
'markers': {'theme': "4", 'tint': "-0.499984740745262"},
'first': {'theme': "4", 'tint': "0.39997558519241921"},
'last': {'theme': "4", 'tint': "0.39997558519241921"},
'high': {'theme': "4"},
'low': {'theme': "4"},
}, # 1
{'series': {'theme': "5", 'tint': "-0.499984740745262"},
'negative': {'theme': "6"},
'markers': {'theme': "5", 'tint': "-0.499984740745262"},
'first': {'theme': "5", 'tint': "0.39997558519241921"},
'last': {'theme': "5", 'tint': "0.39997558519241921"},
'high': {'theme': "5"},
'low': {'theme': "5"},
}, # 2
{'series': {'theme': "6", 'tint': "-0.499984740745262"},
'negative': {'theme': "7"},
'markers': {'theme': "6", 'tint': "-0.499984740745262"},
'first': {'theme': "6", 'tint': "0.39997558519241921"},
'last': {'theme': "6", 'tint': "0.39997558519241921"},
'high': {'theme': "6"},
'low': {'theme': "6"},
}, # 3
{'series': {'theme': "7", 'tint': "-0.499984740745262"},
'negative': {'theme': "8"},
'markers': {'theme': "7", 'tint': "-0.499984740745262"},
'first': {'theme': "7", 'tint': "0.39997558519241921"},
'last': {'theme': "7", 'tint': "0.39997558519241921"},
'high': {'theme': "7"},
'low': {'theme': "7"},
}, # 4
{'series': {'theme': "8", 'tint': "-0.499984740745262"},
'negative': {'theme': "9"},
'markers': {'theme': "8", 'tint': "-0.499984740745262"},
'first': {'theme': "8", 'tint': "0.39997558519241921"},
'last': {'theme': "8", 'tint': "0.39997558519241921"},
'high': {'theme': "8"},
'low': {'theme': "8"},
}, # 5
{'series': {'theme': "9", 'tint': "-0.499984740745262"},
'negative': {'theme': "4"},
'markers': {'theme': "9", 'tint': "-0.499984740745262"},
'first': {'theme': "9", 'tint': "0.39997558519241921"},
'last': {'theme': "9", 'tint': "0.39997558519241921"},
'high': {'theme': "9"},
'low': {'theme': "9"},
}, # 6
{'series': {'theme': "4", 'tint': "-0.249977111117893"},
'negative': {'theme': "5"},
'markers': {'theme': "5", 'tint': "-0.249977111117893"},
'first': {'theme': "5", 'tint': "-0.249977111117893"},
'last': {'theme': "5", 'tint': "-0.249977111117893"},
'high': {'theme': "5", 'tint': "-0.249977111117893"},
'low': {'theme': "5", 'tint': "-0.249977111117893"},
}, # 7
{'series': {'theme': "5", 'tint': "-0.249977111117893"},
'negative': {'theme': "6"},
'markers': {'theme': "6", 'tint': "-0.249977111117893"},
'first': {'theme': "6", 'tint': "-0.249977111117893"},
'last': {'theme': "6", 'tint': "-0.249977111117893"},
'high': {'theme': "6", 'tint': "-0.249977111117893"},
'low': {'theme': "6", 'tint': "-0.249977111117893"},
}, # 8
{'series': {'theme': "6", 'tint': "-0.249977111117893"},
'negative': {'theme': "7"},
'markers': {'theme': "7", 'tint': "-0.249977111117893"},
'first': {'theme': "7", 'tint': "-0.249977111117893"},
'last': {'theme': "7", 'tint': "-0.249977111117893"},
'high': {'theme': "7", 'tint': "-0.249977111117893"},
'low': {'theme': "7", 'tint': "-0.249977111117893"},
}, # 9
{'series': {'theme': "7", 'tint': "-0.249977111117893"},
'negative': {'theme': "8"},
'markers': {'theme': "8", 'tint': "-0.249977111117893"},
'first': {'theme': "8", 'tint': "-0.249977111117893"},
'last': {'theme': "8", 'tint': "-0.249977111117893"},
'high': {'theme': "8", 'tint': "-0.249977111117893"},
'low': {'theme': "8", 'tint': "-0.249977111117893"},
}, # 10
{'series': {'theme': "8", 'tint': "-0.249977111117893"},
'negative': {'theme': "9"},
'markers': {'theme': "9", 'tint': "-0.249977111117893"},
'first': {'theme': "9", 'tint': "-0.249977111117893"},
'last': {'theme': "9", 'tint': "-0.249977111117893"},
'high': {'theme': "9", 'tint': "-0.249977111117893"},
'low': {'theme': "9", 'tint': "-0.249977111117893"},
}, # 11
{'series': {'theme': "9", 'tint': "-0.249977111117893"},
'negative': {'theme': "4"},
'markers': {'theme': "4", 'tint': "-0.249977111117893"},
'first': {'theme': "4", 'tint': "-0.249977111117893"},
'last': {'theme': "4", 'tint': "-0.249977111117893"},
'high': {'theme': "4", 'tint': "-0.249977111117893"},
'low': {'theme': "4", 'tint': "-0.249977111117893"},
}, # 12
{'series': {'theme': "4"},
'negative': {'theme': "5"},
'markers': {'theme': "4", 'tint': "-0.249977111117893"},
'first': {'theme': "4", 'tint': "-0.249977111117893"},
'last': {'theme': "4", 'tint': "-0.249977111117893"},
'high': {'theme': "4", 'tint': "-0.249977111117893"},
'low': {'theme': "4", 'tint': "-0.249977111117893"},
}, # 13
{'series': {'theme': "5"},
'negative': {'theme': "6"},
'markers': {'theme': "5", 'tint': "-0.249977111117893"},
'first': {'theme': "5", 'tint': "-0.249977111117893"},
'last': {'theme': "5", 'tint': "-0.249977111117893"},
'high': {'theme': "5", 'tint': "-0.249977111117893"},
'low': {'theme': "5", 'tint': "-0.249977111117893"},
}, # 14
{'series': {'theme': "6"},
'negative': {'theme': "7"},
'markers': {'theme': "6", 'tint': "-0.249977111117893"},
'first': {'theme': "6", 'tint': "-0.249977111117893"},
'last': {'theme': "6", 'tint': "-0.249977111117893"},
'high': {'theme': "6", 'tint': "-0.249977111117893"},
'low': {'theme': "6", 'tint': "-0.249977111117893"},
}, # 15
{'series': {'theme': "7"},
'negative': {'theme': "8"},
'markers': {'theme': "7", 'tint': "-0.249977111117893"},
'first': {'theme': "7", 'tint': "-0.249977111117893"},
'last': {'theme': "7", 'tint': "-0.249977111117893"},
'high': {'theme': "7", 'tint': "-0.249977111117893"},
'low': {'theme': "7", 'tint': "-0.249977111117893"},
}, # 16
{'series': {'theme': "8"},
'negative': {'theme': "9"},
'markers': {'theme': "8", 'tint': "-0.249977111117893"},
'first': {'theme': "8", 'tint': "-0.249977111117893"},
'last': {'theme': "8", 'tint': "-0.249977111117893"},
'high': {'theme': "8", 'tint': "-0.249977111117893"},
'low': {'theme': "8", 'tint': "-0.249977111117893"},
}, # 17
{'series': {'theme': "9"},
'negative': {'theme': "4"},
'markers': {'theme': "9", 'tint': "-0.249977111117893"},
'first': {'theme': "9", 'tint': "-0.249977111117893"},
'last': {'theme': "9", 'tint': "-0.249977111117893"},
'high': {'theme': "9", 'tint': "-0.249977111117893"},
'low': {'theme': "9", 'tint': "-0.249977111117893"},
}, # 18
{'series': {'theme': "4", 'tint': "0.39997558519241921"},
'negative': {'theme': "0", 'tint': "-0.499984740745262"},
'markers': {'theme': "4", 'tint': "0.79998168889431442"},
'first': {'theme': "4", 'tint': "-0.249977111117893"},
'last': {'theme': "4", 'tint': "-0.249977111117893"},
'high': {'theme': "4", 'tint': "-0.499984740745262"},
'low': {'theme': "4", 'tint': "-0.499984740745262"},
}, # 19
{'series': {'theme': "5", 'tint': "0.39997558519241921"},
'negative': {'theme': "0", 'tint': "-0.499984740745262"},
'markers': {'theme': "5", 'tint': "0.79998168889431442"},
'first': {'theme': "5", 'tint': "-0.249977111117893"},
'last': {'theme': "5", 'tint': "-0.249977111117893"},
'high': {'theme': "5", 'tint': "-0.499984740745262"},
'low': {'theme': "5", 'tint': "-0.499984740745262"},
}, # 20
{'series': {'theme': "6", 'tint': "0.39997558519241921"},
'negative': {'theme': "0", 'tint': "-0.499984740745262"},
'markers': {'theme': "6", 'tint': "0.79998168889431442"},
'first': {'theme': "6", 'tint': "-0.249977111117893"},
'last': {'theme': "6", 'tint': "-0.249977111117893"},
'high': {'theme': "6", 'tint': "-0.499984740745262"},
'low': {'theme': "6", 'tint': "-0.499984740745262"},
}, # 21
{'series': {'theme': "7", 'tint': "0.39997558519241921"},
'negative': {'theme': "0", 'tint': "-0.499984740745262"},
'markers': {'theme': "7", 'tint': "0.79998168889431442"},
'first': {'theme': "7", 'tint': "-0.249977111117893"},
'last': {'theme': "7", 'tint': "-0.249977111117893"},
'high': {'theme': "7", 'tint': "-0.499984740745262"},
'low': {'theme': "7", 'tint': "-0.499984740745262"},
}, # 22
{'series': {'theme': "8", 'tint': "0.39997558519241921"},
'negative': {'theme': "0", 'tint': "-0.499984740745262"},
'markers': {'theme': "8", 'tint': "0.79998168889431442"},
'first': {'theme': "8", 'tint': "-0.249977111117893"},
'last': {'theme': "8", 'tint': "-0.249977111117893"},
'high': {'theme': "8", 'tint': "-0.499984740745262"},
'low': {'theme': "8", 'tint': "-0.499984740745262"},
}, # 23
{'series': {'theme': "9", 'tint': "0.39997558519241921"},
'negative': {'theme': "0", 'tint': "-0.499984740745262"},
'markers': {'theme': "9", 'tint': "0.79998168889431442"},
'first': {'theme': "9", 'tint': "-0.249977111117893"},
'last': {'theme': "9", 'tint': "-0.249977111117893"},
'high': {'theme': "9", 'tint': "-0.499984740745262"},
'low': {'theme': "9", 'tint': "-0.499984740745262"},
}, # 24
{'series': {'theme': "1", 'tint': "0.499984740745262"},
'negative': {'theme': "1", 'tint': "0.249977111117893"},
'markers': {'theme': "1", 'tint': "0.249977111117893"},
'first': {'theme': "1", 'tint': "0.249977111117893"},
'last': {'theme': "1", 'tint': "0.249977111117893"},
'high': {'theme': "1", 'tint': "0.249977111117893"},
'low': {'theme': "1", 'tint': "0.249977111117893"},
}, # 25
{'series': {'theme': "1", 'tint': "0.34998626667073579"},
'negative': {'theme': "0", 'tint': "-0.249977111117893"},
'markers': {'theme': "0", 'tint': "-0.249977111117893"},
'first': {'theme': "0", 'tint': "-0.249977111117893"},
'last': {'theme': "0", 'tint': "-0.249977111117893"},
'high': {'theme': "0", 'tint': "-0.249977111117893"},
'low': {'theme': "0", 'tint': "-0.249977111117893"},
}, # 26
{'series': {'rgb': "FF323232"},
'negative': {'rgb': "FFD00000"},
'markers': {'rgb': "FFD00000"},
'first': {'rgb': "FFD00000"},
'last': {'rgb': "FFD00000"},
'high': {'rgb': "FFD00000"},
'low': {'rgb': "FFD00000"},
}, # 27
{'series': {'rgb': "FF000000"},
'negative': {'rgb': "FF0070C0"},
'markers': {'rgb': "FF0070C0"},
'first': {'rgb': "FF0070C0"},
'last': {'rgb': "FF0070C0"},
'high': {'rgb': "FF0070C0"},
'low': {'rgb': "FF0070C0"},
}, # 28
{'series': {'rgb': "FF376092"},
'negative': {'rgb': "FFD00000"},
'markers': {'rgb': "FFD00000"},
'first': {'rgb': "FFD00000"},
'last': {'rgb': "FFD00000"},
'high': {'rgb': "FFD00000"},
'low': {'rgb': "FFD00000"},
}, # 29
{'series': {'rgb': "FF0070C0"},
'negative': {'rgb': "FF000000"},
'markers': {'rgb': "FF000000"},
'first': {'rgb': "FF000000"},
'last': {'rgb': "FF000000"},
'high': {'rgb': "FF000000"},
'low': {'rgb': "FF000000"},
}, # 30
{'series': {'rgb': "FF5F5F5F"},
'negative': {'rgb': "FFFFB620"},
'markers': {'rgb': "FFD70077"},
'first': {'rgb': "FF5687C2"},
'last': {'rgb': "FF359CEB"},
'high': {'rgb': "FF56BE79"},
'low': {'rgb': "FFFF5055"},
}, # 31
{'series': {'rgb': "FF5687C2"},
'negative': {'rgb': "FFFFB620"},
'markers': {'rgb': "FFD70077"},
'first': {'rgb': "FF777777"},
'last': {'rgb': "FF359CEB"},
'high': {'rgb': "FF56BE79"},
'low': {'rgb': "FFFF5055"},
}, # 32
{'series': {'rgb': "FFC6EFCE"},
'negative': {'rgb': "FFFFC7CE"},
'markers': {'rgb': "FF8CADD6"},
'first': {'rgb': "FFFFDC47"},
'last': {'rgb': "FFFFEB9C"},
'high': {'rgb': "FF60D276"},
'low': {'rgb': "FFFF5367"},
}, # 33
{'series': {'rgb': "FF00B050"},
'negative': {'rgb': "FFFF0000"},
'markers': {'rgb': "FF0070C0"},
'first': {'rgb': "FFFFC000"},
'last': {'rgb': "FFFFC000"},
'high': {'rgb': "FF00B050"},
'low': {'rgb': "FFFF0000"},
}, # 34
{'series': {'theme': "3"},
'negative': {'theme': "9"},
'markers': {'theme': "8"},
'first': {'theme': "4"},
'last': {'theme': "5"},
'high': {'theme': "6"},
'low': {'theme': "7"},
}, # 35
{'series': {'theme': "1"},
'negative': {'theme': "9"},
'markers': {'theme': "8"},
'first': {'theme': "4"},
'last': {'theme': "5"},
'high': {'theme': "6"},
'low': {'theme': "7"},
}, # 36
]
return styles[style_id]
def supported_datetime(dt):
# Determine is an argument is a supported datetime object.
return(isinstance(dt, (datetime.datetime,
datetime.date,
datetime.time,
datetime.timedelta)))
def remove_datetime_timezone(dt_obj, remove_timezone):
# Excel doesn't support timezones in datetimes/times so we remove the
# tzinfo from the object if the user has specified that option in the
# constructor.
if remove_timezone:
dt_obj = dt_obj.replace(tzinfo=None)
else:
if dt_obj.tzinfo:
raise TypeError(
"Excel doesn't support timezones in datetimes. "
"Set the tzinfo in the datetime/time object to None or "
"use the 'remove_timezone' Workbook() option")
return dt_obj
def datetime_to_excel_datetime(dt_obj, date_1904, remove_timezone):
# Convert a datetime object to an Excel serial date and time. The integer
# part of the number stores the number of days since the epoch and the
# fractional part stores the percentage of the day.
date_type = dt_obj
if date_1904:
# Excel for Mac date epoch.
epoch = datetime.datetime(1904, 1, 1)
else:
# Default Excel epoch.
epoch = datetime.datetime(1899, 12, 31)
# We handle datetime .datetime, .date and .time objects but convert
# them to datetime.datetime objects and process them in the same way.
if isinstance(dt_obj, datetime.datetime):
dt_obj = remove_datetime_timezone(dt_obj, remove_timezone)
delta = dt_obj - epoch
elif isinstance(dt_obj, datetime.date):
dt_obj = datetime.datetime.fromordinal(dt_obj.toordinal())
delta = dt_obj - epoch
elif isinstance(dt_obj, datetime.time):
dt_obj = datetime.datetime.combine(epoch, dt_obj)
dt_obj = remove_datetime_timezone(dt_obj, remove_timezone)
delta = dt_obj - epoch
elif isinstance(dt_obj, datetime.timedelta):
delta = dt_obj
else:
raise TypeError("Unknown or unsupported datetime type")
# Convert a Python datetime.datetime value to an Excel date number.
excel_time = (delta.days
+ (float(delta.seconds)
+ float(delta.microseconds) / 1E6)
/ (60 * 60 * 24))
# The following is a workaround for the fact that in Excel a time only
# value is represented as 1899-12-31+time whereas in datetime.datetime()
# it is 1900-1-1+time so we need to subtract the 1 day difference.
if (isinstance(date_type, datetime.datetime)
and dt_obj.isocalendar() == (1900, 1, 1)):
excel_time -= 1
# Account for Excel erroneously treating 1900 as a leap year.
if not date_1904 and excel_time > 59:
excel_time += 1
return excel_time
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
class DeploymentCache:
"""Cached CLI state for a QIIME deployment.
In this context, a QIIME deployment is the set of installed Python
packages, including their exact versions, that register one or more QIIME 2
plugins. The exact version of q2cli is also included in the deployment.
The deployment cache stores the current deployment's package names and
versions in a requirements.txt file under the cache directory. This file is
used to determine if the cache is outdated. If the cache is determined to
be outdated, it will be refreshed based on the current deployment state.
Thus, adding, removing, upgrading, or downgrading a plugin package or q2cli
itself will trigger a cache refresh.
Two mechanisms are provided to force a cache refresh. Setting the
environment variable Q2CLIDEV to any value will cause the cache to be
refreshed upon instantiation. Calling `.refresh()` will also refresh the
cache. Forced refreshing of the cache is useful for plugin and/or q2cli
developers who want their changes to take effect in the CLI without
changing their package versions.
Cached CLI state is stored in a state.json file under the cache directory.
It is not a public file format and it is not versioned. q2cli is included
as part of the QIIME deployment so that the cached state can always be read
(or recreated as necessary) by the currently installed version of q2cli.
This class is intended to be a singleton because it is responsible for
managing the on-disk cache. Having more than one instance managing the
cache has the possibility of two instances clobbering the cache (e.g. in a
multithreaded/multiprocessing situation). Also, having a single instance
improves performance by only reading and/or refreshing the cache a
single time during its lifetime. Having two instances could, for example,
trigger two cache refreshes if Q2CLIDEV is set. To support these use-cases,
a module-level `CACHE` variable stores a single instance of this class.
"""
# Public API
def __init__(self):
import os
# Indicates if the cache has been refreshed. For performance purposes,
# the cache is only refreshed a single time (at maximum) during the
# object's lifetime. Thus, "hot reloading" isn't supported, but this
# shouldn't be necessary for the CLI.
self._refreshed = False
self._cache_dir = self._get_cache_dir()
refresh = 'Q2CLIDEV' in os.environ
self._state = self._get_cached_state(refresh=refresh)
@property
def plugins(self):
"""Decoded JSON object representing CLI state on a per-plugin basis."""
return self._state['plugins']
def refresh(self):
"""Trigger a forced refresh of the cache.
If the cache has already been refreshed (either by this method or at
some point during instantiation), this method is a no-op.
"""
if not self._refreshed:
self._state = self._get_cached_state(refresh=True)
# Private API
def _get_cache_dir(self):
import os
import q2cli.util
cache_dir = q2cli.util.get_cache_dir()
os.makedirs(cache_dir, exist_ok=True)
return cache_dir
def _get_cached_state(self, refresh):
import json
import os.path
import q2cli.util
current_requirements = self._get_current_requirements()
state_path = os.path.join(self._cache_dir, 'state.json')
# See note on `get_completion_path` for why knowledge of this path
# exists in `q2cli.util` and not in this class.
completion_path = q2cli.util.get_completion_path()
# The cache must be refreshed in the following cases:
# 1) We have been explicitly told to refresh.
if refresh:
self._cache_current_state(current_requirements)
# 2) The current deployment requirements are different than the cached
# requirements.
elif current_requirements != self._get_cached_requirements():
self._cache_current_state(current_requirements)
# 3) The cached state file does not exist.
elif not os.path.exists(state_path):
self._cache_current_state(current_requirements)
# 4) The cached bash completion script does not exist.
elif not os.path.exists(completion_path):
self._cache_current_state(current_requirements)
# Now that the cache is up-to-date, read it.
try:
with open(state_path, 'r') as fh:
return json.load(fh)
except json.JSONDecodeError:
# 5) The cached state file can't be read as JSON.
self._cache_current_state(current_requirements)
with open(state_path, 'r') as fh:
return json.load(fh)
# NOTE: The private methods below are all used internally within
# `_get_cached_state`.
def _get_current_requirements(self):
"""Includes installed versions of q2cli and QIIME 2 plugins."""
import os
import pkg_resources
import q2cli
reqs = {
pkg_resources.Requirement.parse('q2cli == %s' % q2cli.__version__)
}
# A distribution (i.e. Python package) can have multiple plugins, where
# each plugin is its own entry point. A distribution's `Requirement` is
# hashable, and the `set` is used to exclude duplicates. Thus, we only
# gather the set of requirements for all installed Python packages
# containing one or more plugins. It is not necessary to track
# individual plugin names and versions in order to determine if the
# cache is outdated.
#
# TODO: this code is (more or less) copied from
# `qiime2.sdk.PluginManager.iter_entry_points`. Importing QIIME is
# currently slow, and it adds ~600-700ms to any CLI command. This makes
# the CLI pretty unresponsive, especially when running help/informative
# commands. Replace with the following lines when
# https://github.com/qiime2/qiime2/issues/151 is fixed:
#
# for ep in qiime2.sdk.PluginManager.iter_entry_points():
# reqs.add(ep.dist.as_requirement())
#
for entry_point in pkg_resources.iter_entry_points(
group='qiime2.plugins'):
if 'QIIMETEST' in os.environ:
if entry_point.name == 'dummy-plugin':
reqs.add(entry_point.dist.as_requirement())
else:
if entry_point.name != 'dummy-plugin':
reqs.add(entry_point.dist.as_requirement())
return reqs
def _get_cached_requirements(self):
import os.path
import pkg_resources
path = os.path.join(self._cache_dir, 'requirements.txt')
if not os.path.exists(path):
# No cached requirements. The empty set will always trigger a cache
# refresh because the current requirements will, at minimum,
# contain q2cli.
return set()
else:
with open(path, 'r') as fh:
contents = fh.read()
try:
return set(pkg_resources.parse_requirements(contents))
except pkg_resources.RequirementParseError:
# Unreadable cached requirements, trigger a cache refresh.
return set()
def _cache_current_state(self, requirements):
import json
import os.path
import click
import q2cli.completion
import q2cli.util
click.secho(
"QIIME is caching your current deployment for improved "
"performance. This may take a few moments and should only happen "
"once per deployment.", fg='yellow', err=True)
cache_dir = self._cache_dir
state = self._get_current_state()
path = os.path.join(cache_dir, 'state.json')
with open(path, 'w') as fh:
json.dump(state, fh)
q2cli.completion.write_bash_completion_script(
state['plugins'], q2cli.util.get_completion_path())
# Write requirements file last because the above steps may raise errors
# (e.g. a plugin can't be loaded in `_get_current_state`). If any part
# of the cache writing fails, it needs to be refreshed the next time
# the cache is accessed. The absence of a requirements file will
# trigger this cache refresh, avoiding this bug:
# https://github.com/qiime2/q2cli/issues/88
path = os.path.join(cache_dir, 'requirements.txt')
with open(path, 'w') as fh:
for req in requirements:
# `str(Requirement)` is the recommended way to format a
# `Requirement` that can be read with `Requirement.parse`.
fh.write(str(req))
fh.write('\n')
self._refreshed = True
def _get_current_state(self):
"""Get current CLI state as an object that is serializable as JSON.
WARNING: This method is very slow and should only be called when the
cache needs to be refreshed.
"""
import qiime2.sdk
state = {
'plugins': {}
}
plugin_manager = qiime2.sdk.PluginManager()
for name, plugin in plugin_manager.plugins.items():
state['plugins'][name] = self._get_plugin_state(plugin)
return state
def _get_plugin_state(self, plugin):
state = {
# TODO this conversion also happens in the framework
# (qiime2/plugins.py) to generate an importable module name from a
# plugin's `.name` attribute. Centralize this knowledge in the
# framework, ideally as a machine-friendly plugin ID (similar to
# `Action.id`).
'id': plugin.name.replace('-', '_'),
'name': plugin.name,
'version': plugin.version,
'website': plugin.website,
'citation_text': plugin.citation_text,
'user_support_text': plugin.user_support_text,
'description': plugin.description,
'short_description': plugin.short_description,
'actions': {}
}
for id, action in plugin.actions.items():
state['actions'][id] = self._get_action_state(action)
return state
def _get_action_state(self, action):
import itertools
state = {
'id': action.id,
'name': action.name,
'description': action.description,
'signature': []
}
sig = action.signature
for name, spec in itertools.chain(sig.signature_order.items(),
sig.outputs.items()):
data = {'name': name, 'repr': repr(spec.qiime_type),
'ast': spec.qiime_type.to_ast()}
if name in sig.inputs:
type = 'input'
elif name in sig.parameters:
type = 'parameter'
else:
type = 'output'
data['type'] = type
if spec.has_description():
data['description'] = spec.description
if spec.has_default():
data['default'] = spec.default
state['signature'].append(data)
return state
# Singleton. Import and use this instance as necessary.
CACHE = DeploymentCache()
|
|
from splitwise.debt import Debt
from splitwise.balance import Balance
class Group(object):
""" Represents a splitwise group.
Attributes:
id(long, optional): ID of the group
name(str, optional): Name of the group
updated_at(str, optional): ISO 8601 Date time. The last updated date time of group
created_at(str, optional): ISO 8601 Date time. The created date time of group
simplify_by_default(bool, optional): Is Simplified expenses by default
group_type(str, optional): Type of the group
whiteboard(str, optional): Whiteboard of the group
invite_link(str, optional): Invitation link of the group
country_code(str, optional): Country of the group
original_debts(:obj:`list` of :obj:`splitwise.debt.Debt`, optional): List of original debts
simplfied_debts(:obj:`list` of :obj:`splitwise.debt.Debt`, optional): List of simplfied debts
members(:obj:`list` of :obj:`splitwise.user.Friend`, optional): List of members of the group
"""
def __init__(self, data=None):
"""
Args:
data(:obj:`json`, optional): JSON object representing group object
"""
from splitwise.user import Friend
if data:
self.id = data["id"]
self.name = data["name"]
self.updated_at = data["updated_at"]
self.created_at = data["created_at"]
self.simplify_by_default = data["simplify_by_default"]
if "group_type" in data:
self.group_type = data["group_type"]
else:
self.group_type = None
if "whiteboard" in data:
self.whiteboard = data["whiteboard"]
else:
self.whiteboard = None
if "invite_link" in data:
self.invite_link = data["invite_link"]
else:
self.invite_link = None
if "country_code" in data:
self.country_code = data["country_code"]
else:
self.country_code = None
self.original_debts = []
for debt in data["original_debts"]:
self.original_debts.append(Debt(debt))
self.simplified_debts = []
for debt in data["simplified_debts"]:
self.simplified_debts.append(Debt(debt))
self.members = []
for member in data["members"]:
self.members.append(Friend(member))
def getId(self):
""" Returns the id of the group
Returns:
long: ID of the group
"""
return self.id
def getName(self):
""" Returns the name of the group
Returns:
str: name of the group
"""
return self.name
def getCreatedAt(self):
""" Returns the ISO 8601 create date time of the group
Returns:
str: ISO 8601 create date time of the group
"""
return self.created_at
def getUpdatedAt(self):
""" Returns the ISO 8601 update date time of the group
Returns:
str: ISO 8601 update date time of the group
"""
return self.updated_at
def getWhiteBoard(self):
""" Returns the whiteboard of the group
Returns:
str: whiteboard of the group
"""
return self.whiteboard
def isSimplifiedByDefault(self):
""" Returns if simplified by default
Returns:
bool: simplified by default
"""
return self.simplify_by_default
def getMembers(self):
""" Returns the list of group members
Returns:
:obj:`list` of :obj:`splitwise.user.Friend`: List of members of the group
"""
return self.members
def getOriginalDebts(self):
""" Returns the list of original debts
Returns:
:obj:`list` of :obj:`splitwise.debt.Debt`: List of original debt of the group
"""
return self.original_debts
def getType(self):
""" Returns the type of the group
Returns:
str: type of the group
"""
return self.group_type
def getGroupType(self):
""" Returns the type of the group
Returns:
str: type of the group
"""
return self.group_type
def getSimplifiedDebts(self):
""" Returns the list of simplified debts
Returns:
:obj:`list` of :obj:`splitwise.debt.Debt`: List of simplified debt of the group
"""
return self.simplified_debts
def getInviteLink(self):
""" Returns the invitation link of the group
Returns:
str: invitation link of the group
"""
return self.invite_link
def setName(self, name):
""" Sets the name of the group
Args:
name(str): name of the group
"""
self.name = name
def setType(self, group_type):
""" Sets the type of the group
Args:
group_type(str): type of the group
"""
self.group_type = group_type
def setGroupType(self, group_type):
""" Sets the type of the group
Args:
group_type(str): type of the group
"""
self.group_type = group_type
def setWhiteBoard(self, whiteboard):
""" Sets the whiteboard of the group
Args:
whiteboard(str): whiteboard of the group
"""
self.whiteboard = whiteboard
def setCountryCode(self, country_code):
""" Sets the country code of the group
Args:
country_code(str): country code of the group
"""
self.country_code = country_code
def setMembers(self, members):
""" Sets the members of the group
Args:
members(:obj:`list` of :obj:`splitwise.user.Friend`): list of members of the group
"""
self.members = members
def addMember(self, member):
""" Adds a member to the group
Args:
member(:obj:`splitwise.user.Friend`): members of the group
"""
if not hasattr(self, 'members'):
self.members = []
self.members.append(member)
class FriendGroup(object):
""" Simplified Group while representing a Friend.
Attributes:
id(long, optional): ID of the group
balances(:obj:`list` of :obj:`splitwise.balance.Balance`, optional): List of balances of the group
"""
def __init__(self, data=None):
"""
Args:
data(:obj:`json`, optional): JSON object representing group
"""
if data:
self.id = data["group_id"]
self.balances = []
for balance in data["balance"]:
self.balances.append(Balance(balance))
def setId(self, id):
""" Sets the id of the group
Args:
id(long): ID of the group
"""
self.id = id
def getId(self):
""" Returns the id of the group
Returns:
long: ID of the group
"""
return self.id
def getBalances(self):
""" Returns the balances of the group
Returns:
:obj:`list` of :obj:`splitwise.balance.Balance`: Balances of the group
"""
return self.balances
|
|
import traceback
import numpy as np
from pycqed.analysis_v3 import helper_functions
from pycqed.measurement.waveform_control.sequence import Sequence
from pycqed.utilities.general import temporary_value
from pycqed.utilities.timer import Timer, Checkpoint
from pycqed.measurement.waveform_control.circuit_builder import CircuitBuilder
from pycqed.measurement import sweep_functions as swf
import pycqed.measurement.awg_sweep_functions as awg_swf
from pycqed.measurement import multi_qubit_module as mqm
import pycqed.analysis_v2.base_analysis as ba
import pycqed.utilities.general as general
from copy import deepcopy
import logging
log = logging.getLogger(__name__)
class QuantumExperiment(CircuitBuilder):
"""
Base class for Experiments with pycqed. A QuantumExperiment consists of
3 main parts:
- The __init__(), which takes care of initializing the parent class
(CircuitBuilder) and setting all the attributes of the quantum experiment
- the run_measurement(), which is the skeleton of any measurement in pycqed.
This function should *not* be modified by child classes
- the run_analysis(), which defaults to calling BaseDataAnalysis. This function
may be overwritten by child classes to start measurement-specific analysis
"""
_metadata_params = {'cal_points', 'preparation_params', 'sweep_points',
'channel_map', 'meas_objs'}
# The following string can be overwritten by child classes to provide a
# default value for the kwarg experiment_name. None means that the name
# of the first sequences will be used.
default_experiment_name = None
def __init__(self, dev=None, qubits=None, operation_dict=None,
meas_objs=None, classified=False, MC=None,
label=None, exp_metadata=None, upload=True, measure=True,
analyze=True, temporary_values=(), drive="timedomain",
sequences=(), sequence_function=None, sequence_kwargs=None,
plot_sequence=False, filter_segments_mask=None, df_kwargs=None, df_name=None,
timer_kwargs=None, mc_points=None, sweep_functions=(awg_swf.SegmentHardSweep,
awg_swf.SegmentSoftSweep),
harmonize_element_lengths=False,
compression_seg_lim=None, force_2D_sweep=True, callback=None,
callback_condition=lambda : True, **kw):
"""
Initializes a QuantumExperiment.
Args:
dev (Device): Device object used for the experiment. Defaults to None.
qubits (list): list of qubits used for the experiment (e.g. a subset of
qubits on the device). Defaults to None. (see circuitBuilder for more
details).
operation_dict (dict): dictionary with operations. Defaults to None.
(see circuitBuilder for more details).
meas_objs (list): list of measure object (e.g., qubits) to be read
out (i.e. for which the detector functions will be
prepared). Defaults to self.qubits (attribute set by
CircuitBuilder). Required for run_measurement() when qubits
is None.
classified (bool): whether
MC (MeasurementControl): MeasurementControl object. Required for
run_measurement() if qubits is None and device is None.
label (str): Measurement label
exp_metadata (dict): experimental metadata saved in hdf5 file
upload (bool): whether or not to upload the sequences to the AWGs
measure (bool): whether or not to measure
analyze (bool): whether or not to analyze
temporary_values (list): list of temporary values with the form:
[(Qcode_param_1, value_1), (Qcode_param_2, value_2), ...]
drive (str): qubit configuration.
sequences (list): list of sequences for the experiment. Note that
even in the case of a single sequence, a list is required.
Required if sequence_function is None.
sequence_function (callable): functions returning the sequences,
see self._prepare_sequences() for more details. Required for
run_measurement if sequences is None
sequence_kwargs (dict): keyword arguments passed to the sequence_function.
see self._prepare_sequences()
filter_segments_mask (array of bool): An array with dimension
n_0 x n_1, indicating which segments need to be measured.
Here, n_1 is the number of sweep points in dimension 1 (soft sweep)
and n_0 <= N_0, where N_0 is the number of sweep points in
dimension 0 (hard sweep) including calibration points. If
n_0 < N_0, segments with index larger than n_0 will always
be measured (typical use case: calibration points, i.e.,
let n_0 be the number of sweep pooints without calibration
points).
The lower-level implementation in FilteredSweep and Pulsar
currently only supports a single consecutive range of
segments to be measured in each row of this array (plus the
segments with index >n_0, which are always measured). To
fulfill this requirement, QuantumExperiment will internally
change False-values to True in this array if needed, i.e.,
it can happen that segments are measured even though their
entry in this array is set to False.
df_kwargs (dict): detector function keyword arguments.
timer_kwargs (dict): keyword arguments for timer. See pycqed.utilities.timer.
Timer.
df_name (str): detector function name.
mc_points (tuple): tuple of 2 lists with first and second dimension
measurement control points (previously also called sweep_points,
but name has changed to avoid confusion with SweepPoints):
[first_dim_mc_points, second_dim_mc_points]. MC points
correspond to measurement_control sweep points i.e. sweep points
directly related to the instruments, e.g. segment readout index.
Not required when using sweep_functions SegmentSoftSweep and
SegmentHardSweep as these may be inferred from the sequences objects.
In case other sweep functions are used (e.g. for sweeping instrument
parameters), then the sweep points must be specified. Note that the list
must always have two entries. E.g. for a 1D sweep of LO frequencies,
mc_points should be of the form: (freqs, [])
sweep_functions (tuple): tuple of sweepfunctions. Similarly to mc_points,
sweep_functions has 2 entries, one for each dimension. Defaults to
SegmentHardSweep for the first sweep dimensions and SegmentSoftSweep
for the second dimension.
harmonize_element_lengths (bool, default False): whether it
should be ensured for all AWGs and all elements that the
element length is the same in all sequences. Use case: If
pulsar.use_sequence_cache and pulsar.AWGX_use_placeholder_waves
are activated for a ZI HDAWG, harmonized element lengths across
a soft sweep avoid recompilation of SeqC code during the sweep
(replacing binary waveform data is sufficient in this case).
compression_seg_lim (int): maximal number of segments that can be in a
single sequence. If not None and the QuantumExperiment is a 2D sweep
with more than 1 sequence, and the sweep_functions are
(SegmentHardSweep, SegmentSoftsweep), then the quantumExperiment
will try to compress the sequences, see Sequence.compress_2D_sweep.
force_2D_sweep (bool): whether or not to force a two-dimensional sweep.
In that case, even if there is only one sequence, a second
sweep_function dimension is added. The idea is to use this more
and more to generalize data format passed to the analysis.
callback (func): optional function to call after run_analysis() in
autorun(). All arguments passed to autorun will be passed down to
the callback.
callback_condition (func): function returning a bool to decide whether or
not the callback function should be executed. Defaults to always True.
**kw:
further keyword arguments are passed to the CircuitBuilder __init__
"""
self.timer = Timer('QuantumExperiment', **timer_kwargs if timer_kwargs is
not None else {})
if qubits is None and dev is None and operation_dict is None:
raise NotImplementedError('Experiments without qubits are not '
'implemented yet. Either dev or qubits'
'or operation_dict has to be provided.')
# planned future behavior (but has to be tested in all aspects):
# if no qubits/devive/operation_dict are provided, use empty
# list to skip iterations over qubit lists
# qubits = []
super().__init__(dev=dev, qubits=qubits, operation_dict=operation_dict,
**kw)
self.exp_metadata = exp_metadata
if self.exp_metadata is None:
self.exp_metadata = {}
self.create_meas_objs_list(**kw, meas_objs=meas_objs)
self.MC = MC
self.classified = classified
self.label = label
self.upload = upload
self.measure = measure
self.temporary_values = list(temporary_values)
self.analyze = analyze
self.drive = drive
self.callback = callback
self.callback_condition = callback_condition
self.plot_sequence = plot_sequence
self.sequences = list(sequences)
self.sequence_function = sequence_function
self.sequence_kwargs = {} if sequence_kwargs is None else sequence_kwargs
self.filter_segments_mask = filter_segments_mask
self.sweep_points = self.sequence_kwargs.get("sweep_points", None)
self.mc_points = mc_points if mc_points is not None else [[], []]
self.sweep_functions = sweep_functions
self.force_2D_sweep = force_2D_sweep
self.compression_seg_lim = compression_seg_lim
self.harmonize_element_lengths = harmonize_element_lengths
# The experiment_name might have been set by the user in kw or by a
# child class as an attribute. Otherwise, the default None will
# trigger guess_label to use the sequence name.
self.experiment_name = kw.pop(
'experiment_name', getattr(self, 'experiment_name',
self.default_experiment_name))
self.timestamp = None
self.analysis = None
# detector and sweep functions
default_df_kwargs = {'det_get_values_kws':
{'classified': self.classified,
'correlated': False,
'thresholded': True,
'averaged': True}}
self.df_kwargs = default_df_kwargs if df_kwargs is None else df_kwargs
if df_name is not None:
self.df_name = df_name
if 'classif' in df_name:
self.classified = True
else:
self.df_name = 'int_avg{}_det'.format('_classif' if self.classified else '')
self.df = None
# determine data type
if "log" in self.df_name or not \
self.df_kwargs.get("det_get_values_kws",
{}).get('averaged', True):
data_type = "singleshot"
else:
data_type = "averaged"
self.exp_metadata.update(kw)
self.exp_metadata.update({'classified_ro': self.classified,
'cz_pulse_name': self.cz_pulse_name,
'data_type': data_type})
def create_meas_objs_list(self, meas_objs=None, **kwargs):
"""
Creates a default list for self.meas_objs if meas_objs is not provided,
and creates the list self.meas_obj_names.
Args:
meas_objs (list): a list of measurement objects (or None for
default, which is self.qubits)
"""
self.meas_objs = self.qubits if meas_objs is None else meas_objs
self.meas_obj_names = [m.name for m in self.meas_objs]
def _update_parameters(self, overwrite_dicts=True, **kwargs):
"""
Update all attributes of the quantumExperiment class.
Args:
overwrite_dicts (bool): whether or not to overwrite
attributes that are dictionaries. If False,
then dictionaries are updated.
**kwargs: any attribute of the QuantumExperiment class
"""
for param_name, param_value in kwargs.items():
if hasattr(self, param_name):
if isinstance(param_value, dict) and not overwrite_dicts:
getattr(self, param_name).update(param_value)
else:
setattr(self, param_name, param_value)
@Timer()
def run_measurement(self, save_timers=True, **kw):
"""
Runs a measurement. Any keyword argument passes to this function that
is also an attribute of the QuantumExperiment class will be updated
before starting the experiment
Args:
save_timers (bool): whether timers should be saved to the hdf
file at the end of the measurement (default: True).
Returns:
"""
self._update_parameters(**kw)
assert self.meas_objs is not None, 'Cannot run measurement without ' \
'measure objects.'
if len(self.mc_points) == 1:
self.mc_points = [self.mc_points[0], []]
exception = None
with temporary_value(*self.temporary_values):
# Perpare all involved qubits. If not available, prepare
# all measure objects.
mos = self.qubits if self.qubits is not None else self.meas_objs
for m in mos:
m.prepare(drive=self.drive)
# create/retrieve sequence to run
self._prepare_sequences(self.sequences, self.sequence_function,
self.sequence_kwargs)
# configure measurement control (mc_points, detector functions)
mode = self._configure_mc()
self.guess_label(**kw)
self.update_metadata()
# run measurement
try:
self.MC.run(name=self.label, exp_metadata=self.exp_metadata,
mode=mode)
except (Exception, KeyboardInterrupt) as e:
exception = e # exception will be raised below
self.extract_timestamp()
if save_timers:
self.save_timers()
if exception is not None:
raise exception
def update_metadata(self):
# make sure that all metadata params are up to date
for name in self._metadata_params:
if hasattr(self, name):
value = getattr(self, name)
try:
if name in ('cal_points', 'sweep_points') and \
value is not None:
old_val = np.get_printoptions()['threshold']
np.set_printoptions(threshold=np.inf)
self.exp_metadata.update({name: repr(value)})
np.set_printoptions(threshold=old_val)
elif name in ('meas_objs', "qubits") and value is not None:
self.exp_metadata.update(
{name: [qb.name for qb in value]})
else:
self.exp_metadata.update({name: value})
except Exception as e:
log.error(
f"Could not add {name} with value {value} to the "
f"metadata")
raise e
def extract_timestamp(self):
try:
self.timestamp = self.MC.data_object._datemark + '_' \
+ self.MC.data_object._timemark
except Exception:
pass # if extraction fails, keep the old value (None from init)
def guess_label(self, **kwargs):
"""
Creates a default label.
Returns:
"""
if self.label is None:
if self.experiment_name is None:
self.experiment_name = self.sequences[0].name
self.label = self.experiment_name
_, qb_names = self.get_qubits(self.qubits)
if self.dev is not None:
self.label += self.dev.get_msmt_suffix(self.meas_obj_names)
else:
# guess_label is called from run_measurement -> we have qubits
self.label += mqm.get_multi_qubit_msmt_suffix(self.meas_objs)
@Timer()
def run_analysis(self, analysis_class=None, analysis_kwargs=None, **kw):
"""
Launches the analysis.
Args:
analysis_class: Class to use for the analysis
analysis_kwargs: keyword arguments passed to the analysis class
Returns: analysis object
"""
if analysis_class is None:
analysis_class = ba.BaseDataAnalysis
if analysis_kwargs is None:
analysis_kwargs = {}
self.analysis = analysis_class(**analysis_kwargs)
return self.analysis
def autorun(self, **kw):
if self.measure:
try:
# Do not save timers here since they will be saved below.
self.run_measurement(save_timers=False, **kw)
except (Exception, KeyboardInterrupt) as e:
self.save_timers()
raise e
# analyze and call callback only when measuring
if self.analyze:
self.run_analysis(**kw)
if self.callback is not None and self.callback_condition():
self.callback(**kw)
self.save_timers() # for now store timers only if creating new file
return self
def serialize(self, omitted_attrs=('MC', 'device', 'qubits')):
"""
Map a Quantum experiment to a large dict for hdf5 storage/pickle object,
etc.
Returns:
"""
raise NotImplementedError()
@Timer()
def _prepare_sequences(self, sequences=None, sequence_function=None,
sequence_kwargs=None):
"""
Prepares/build sequences for a measurement.
Args:
sequences (list): list of sequences to run. Optional. If not given
then a sequence_function from which the sequences can be created
is required.
sequence_function (callable): sequence function to generate sequences..
Should return with one of the following formats:
- a list of sequences: valid if the first and second
sweepfunctions are SegmentHardSweep and SegmentSoftsweep
respectively.
- a sequence: valid if the sweepfunction is SegmentHardsweep
- One of the following tuples:
(sequences, mc_points_tuple), where mc_points_tuple is a
tuple in which each entry corresponds to a dimension
of the sweep. This is the preferred option.
For backwards compatibility, the following two tuples are
also accepted:
(sequences, mc_points_first_dim, mc_points_2nd_dim)
(sequences, mc_points_first_dim)
sequence_kwargs (dict): arguments to pass to the sequence
function if sequence_function is not None. If
sequence_function is None, the following entries in this
dict are supported:
- extra_sequences (list): a list of additional sequences to
measure. This is useful for combining sequences that are
automatically generated by a child-class of
QuantumExperiment with user-provided sequences into a
single experiment (e.g., for measuring them in a single
upload by specifying a sufficiently high
compression_seg_lim). The user has to ensure that the
extra sequences are compatible with the normal sequences
of the QuantumExperiment, e.g., in terms of number of
acquisition elements.
Returns:
"""
if sequence_kwargs is None:
sequence_kwargs = {}
if sequence_function is not None:
# build sequence from function
seq_info = sequence_function(**sequence_kwargs)
if isinstance(seq_info, list):
self.sequences = seq_info
elif isinstance(seq_info, Sequence):
self.sequences = [seq_info]
elif len(seq_info) == 3: # backwards compatible 2D sweep
self.sequences, \
(self.mc_points[0], self.mc_points[1]) = seq_info
elif len(seq_info) == 2:
if np.ndim(seq_info[1]) == 1:
# backwards compatible 1D sweep
self.sequences, self.mc_points[0] = seq_info
else:
self.sequences, self.mc_points = seq_info
# ensure self.sequences is a list
if np.ndim(self.sequences) == 0:
self.sequences = [self.sequences]
elif sequences is not None:
extra_seqs = deepcopy(sequence_kwargs.get('extra_sequences', []))
for seq in extra_seqs:
seq.name = 'Extra' + seq.name
self.sequences = sequences + extra_seqs
if len(self.mc_points) > 1 and len(self.mc_points[1]):
# mc_points are set and won't be generated automatically.
# We have to add additional points for the extra sequences.
self.mc_points[1] = np.concatenate([
self.mc_points[1],
np.arange(len(extra_seqs)) + self.mc_points[1][-1] + 1])
# check sequence
assert len(self.sequences) != 0, "No sequence found."
if self.plot_sequence:
self.plot()
@Timer()
def _configure_mc(self, MC=None):
"""
Configure the measurement control (self.MC) for the measurement.
This includes setting the sweep points and the detector function.
By default, SegmentHardSweep is the sweepfunction used for the first
dimension and SegmentSoftSweep is the sweepfunction used for the second
dimension. In case other sweepfunctions should be used, self.sweep_functions
should be modified prior to the call of this function.
Returns:
mmnt_mode (str): "1D" or "2D"
"""
# ensure measurement control is set
self._set_MC(MC)
# configure mc_points
if len(self.mc_points[0]) == 0: # first dimension mc_points not yet set
if self.sweep_functions[0] == awg_swf.SegmentHardSweep:
# first dimension mc points can be retrieved as
# ro_indices from sequence
self.mc_points[0] = np.arange(self.sequences[0].n_acq_elements())
else:
raise ValueError("The first dimension of mc_points must be provided "
"with sequence if the sweep function isn't "
"'SegmentHardSweep'.")
if len(self.sequences) > 1 and len(self.mc_points[1]) == 0:
if self.sweep_functions[1] == awg_swf.SegmentSoftSweep:
# 2nd dimension mc_points can be retrieved as sequence number
self.mc_points[1] = np.arange(len(self.sequences))
elif self.sweep_points is not None and len(self.sweep_points) > 1:
# second dimension can be inferred from sweep points
self.mc_points[1] = self.sweep_points.get_sweep_params_property(
'values', 1)
else:
raise ValueError("The second dimension of mc_points must be provided "
"if the sweep function isn't 'SegmentSoftSweep' and"
"no sweep_point object is given.")
# force 2D sweep if needed (allow 1D sweep for backwards compatibility)
if len(self.mc_points[1]) == 0 and self.force_2D_sweep:
self.mc_points[1] = np.array([0]) # force 2d with singleton
# set mc points
if len(self.sequences) > 1:
# compress 2D sweep
if self.compression_seg_lim is not None:
if self.sweep_functions == (awg_swf.SegmentHardSweep,
awg_swf.SegmentSoftSweep):
self.sequences, self.mc_points[0], \
self.mc_points[1], cf = \
self.sequences[0].compress_2D_sweep(self.sequences,
self.compression_seg_lim,
True,
self.mc_points[0])
self.exp_metadata.update({'compression_factor': cf})
else:
log.warning("Sequence compression currently does not support"
"sweep_functions different than (SegmentHardSweep,"
" SegmentSoftSweep). This could easily be implemented"
"by modifying Sequence.compress_2D_sweep to accept"
"mc_points and do the appropriate reshaping. Feel"
"free to make a pull request ;). Skipping compression"
"for now.")
if self.harmonize_element_lengths:
Sequence.harmonize_element_lengths(self.sequences)
try:
sweep_param_name = list(self.sweep_points[0])[0]
unit = self.sweep_points.get_sweep_params_property(
'unit', 0, param_names=sweep_param_name)
except TypeError:
sweep_param_name, unit = "None", ""
if self.sweep_functions[0] == awg_swf.SegmentHardSweep:
sweep_func_1st_dim = self.sweep_functions[0](
sequence=self.sequences[0], upload=self.upload,
parameter_name=sweep_param_name, unit=unit)
else:
# In case of an unknown sweep function type, it is assumed
# that self.sweep_functions[0] has already been initialized
# with all required parameters and can be directly passed to
# MC.
sweep_func_1st_dim = self.sweep_functions[0]
self.MC.set_sweep_function(sweep_func_1st_dim)
self.MC.set_sweep_points(self.mc_points[0])
# set second dimension sweep function
if len(self.mc_points[1]) > 0: # second dimension exists
try:
sweep_param_name = list(self.sweep_points[1])[0]
unit = self.sweep_points.get_sweep_params_property(
'unit', 1, param_names=sweep_param_name)
except TypeError:
sweep_param_name, unit = "None", ""
if self.sweep_functions[1] == awg_swf.SegmentSoftSweep:
sweep_func_2nd_dim = self.sweep_functions[1](
sweep_func_1st_dim, self.sequences, sweep_param_name, unit)
else:
# Check whether it is a nested sweep function whose first
# sweep function is a SegmentSoftSweep class as placeholder.
swfs = getattr(self.sweep_functions[1], 'sweep_functions',
[None])
if (swfs[0] == awg_swf.SegmentSoftSweep):
# Replace the SegmentSoftSweep placeholder by a properly
# configured instance of SegmentSoftSweep.
if len(swfs) > 1:
# make sure that units are compatible
unit = getattr(swfs[1], 'unit', unit)
swfs[0] = awg_swf.SegmentSoftSweep(
sweep_func_1st_dim, self.sequences,
sweep_param_name, unit)
# In case of an unknown sweep function type, it is assumed
# that self.sweep_functions[1] has already been initialized
# with all required parameters and can be directly passed to
# MC.
sweep_func_2nd_dim = self.sweep_functions[1]
if self.filter_segments_mask is not None and \
self.compression_seg_lim is not None:
log.warning("Combining compression_seg_lim and "
"filter_segments_mask is not supported. Ignoring "
"filter_segments_mask.")
elif self.filter_segments_mask is not None:
mask = np.array(self.filter_segments_mask)
# Only segments with indices included in the mask can be
# filtered out. The others will always be measured.
for seq in self.sequences:
for i, seg in enumerate(seq.segments.values()):
if i < mask.shape[0]:
seg.allow_filter = True
# Create filter lookup table in the format expected by
# FilteredSweep: each key is a soft sweep point and the
# respective value is a tuple two of segment indices
# indicating the range of segments to be measured.
# The conversion to a tuple of start index and end index may
# require to measure segments that have a False-entry in the
# filter_segments_mask, see the class docstring.
filter_lookup = {}
for i, sp in enumerate(self.mc_points[1]):
if i >= mask.shape[1]:
# measure everything
filter_lookup[sp] = (0, 32767)
elif True in mask[:, i]:
# measure from the first True up to the last True
filter_lookup[sp] = (
list(mask[:, i]).index(True),
mask.shape[0] - list(mask[:, i])[::-1].index(
True) - 1)
else:
# measure nothing (by setting last < first)
filter_lookup[sp] = (1, 0)
sweep_func_2nd_dim = swf.FilteredSweep(
self.sequences[0], filter_lookup, [sweep_func_2nd_dim])
self.MC.set_sweep_function_2D(sweep_func_2nd_dim)
self.MC.set_sweep_points_2D(self.mc_points[1])
# check whether there is at least one measure object
if len(self.meas_objs) == 0:
raise ValueError('No measure objects provided. Cannot '
'configure detector functions')
# Configure detector function
# FIXME: this should be extended to meas_objs that are not qubits
self.df = mqm.get_multiplexed_readout_detector_functions(
self.df_name, self.meas_objs, **self.df_kwargs)
self.MC.set_detector_function(self.df)
if self.dev is not None:
meas_obj_value_names_map = self.dev.get_meas_obj_value_names_map(
self.meas_objs, self.df)
else:
meas_obj_value_names_map = mqm.get_meas_obj_value_names_map(
self.meas_objs, self.df)
self.exp_metadata.update(
{'meas_obj_value_names_map': meas_obj_value_names_map})
if 'meas_obj_sweep_points_map' not in self.exp_metadata:
self.exp_metadata['meas_obj_sweep_points_map'] = {}
if self.MC.soft_repetitions() != 1:
self.exp_metadata['soft_repetitions'] = self.MC.soft_repetitions()
if len(self.mc_points[1]) > 0:
mmnt_mode = "2D"
else:
mmnt_mode = "1D"
return mmnt_mode
def _set_MC(self, MC=None):
"""
Sets the measurement control and raises an error if no MC
could be retrieved from device/qubits objects
Args:
MC (MeasurementControl):
Returns:
"""
if MC is not None:
self.MC = MC
elif self.MC is None:
try:
self.MC = self.dev.instr_mc.get_instr()
except AttributeError:
try:
self.MC = self.meas_objs[0].instr_mc.get_instr()
except (AttributeError, IndexError):
raise ValueError("The Measurement Control (MC) could not "
"be retrieved because no Device/measure "
"objects were found. Pass the MC to "
"run_measurement() or set the MC attribute"
" of the QuantumExperiment instance.")
# def __setattr__(self, name, value):
# """
# Observes attributes which are set to this class. If they are in the
# _metadata_params then they are automatically added to the experimental
# metadata
# Args:
# name:
# value:
#
# Returns:
#
# """
# if name in self._metadata_params:
# try:
# if name in 'cal_points' and value is not None:
# self.exp_metadata.update({name: repr(value)})
# elif name in ('meas_objs', "qubits") and value is not None:
# self.exp_metadata.update({name: [qb.name for qb in value]})
# else:
# self.exp_metadata.update({name: value})
# except Exception as e:
# log.error(f"Could not add {name} with value {value} to the "
# f"metadata")
# raise e
#
# self.__dict__[name] = value
def save_timers(self, quantum_experiment=True, sequence=True, segments=True, filepath=None):
if self.MC is None or self.MC.skip_measurement():
return
data_file = helper_functions.open_hdf_file(self.timestamp, filepath=filepath, mode="r+")
try:
timer_group = data_file.get(Timer.HDF_GRP_NAME)
if timer_group is None:
timer_group = data_file.create_group(Timer.HDF_GRP_NAME)
if quantum_experiment:
self.timer.save(timer_group)
if sequence:
seq_group = timer_group.create_group('Sequences')
for s in self.sequences:
# save sequence timers
try:
timer_seq_name = s.timer.name
# check that name doesn't exist and it case it does, append an index
# Note: normally that should not happen (not desirable)
if timer_seq_name in seq_group.keys():
log.warning(f"Timer with name {timer_seq_name} already "
f"exists in Sequences timers. "
f"Only last instance will be kept")
s.timer.save(seq_group)
if segments:
seg_group = seq_group[timer_seq_name].create_group(timer_seq_name + ".segments")
for _, seg in s.segments.items():
try:
timer_seg_name = seg.timer.name
# check that name doesn't exist and it case it does, append an index
# Note: normally that should not happen (not desirable)
if timer_seg_name in seg_group.keys():
log.warning(f"Timer with name {timer_seg_name} already "
f"exists in Segments timers. "
f"Only last instance will be kept")
seg.timer.save(seg_group)
except AttributeError:
pass
except AttributeError:
pass # in case some sequences don't have timers
except Exception as e:
data_file.close()
raise e
def plot(self, sequences=0, segments=0, qubits=None,
save=False, legend=True, **plot_kwargs):
"""
Plots (a subset of) sequences / segments of the QuantumExperiment
:param sequences (int, list, "all"): sequences to plot. Can be "all"
(plot all sequences),
an integer (index of sequence to plot), or a list of
integers/str. If strings are in the list, then plots only sequences
with the corresponding name.
:param segments (int, list, "all"): Segments to be plotted.
If a single index i is provided, then the ith segment will be plot-
ted for each sequence in `sequences`. Otherwise a list of list of
indices must be provided: the outer list corresponds to each
sequence and the inner list to the indices of the segments to plot.
E.g. segments=[[0,1],[3]] will plot segment 0 and 1 of
sequence 0 and segment 3 of sequence 1.
If the string 'all' is provided, then all segments are plotted.
Plots segment 0 by default.
:param qubits (list): list of qubits to plot.
Defaults to self.meas_objs. Qubits can be specified as qubit names
or qubit objects.
:param save (bool): whether or not to save the figures in the
measurement folder.
:param legend (bool): whether or not to show the legend.
:param plot_kwargs: kwargs passed on to segment.plot(). By default,
channel_map is taken from dev.get_channel_map(qubits) if available.
:return:
"""
plot_kwargs = deepcopy(plot_kwargs)
if sequences == "all":
# plot all sequences
sequences = self.sequences
# if the provided sequence is not it a list or tuple, make it a list
if np.ndim(sequences) == 0:
sequences = [sequences]
# get sequence objects from sequence name or index
sequences = np.ravel([[s for i, s in enumerate(self.sequences)
if i == ind or s.name == ind]
for ind in sequences])
if qubits is None:
qubits = self.meas_objs
qubits, _ = self.get_qubits(qubits) # get qubit objects
default_ch_map = \
self.dev.get_channel_map(qubits) if self.dev is not None else \
{qb.name: qb.get_channels() for qb in qubits}
plot_kwargs.update(dict(channel_map=plot_kwargs.pop('channel_map',
default_ch_map)))
plot_kwargs.update(dict(legend=legend))
if segments == "all":
# plot all segments
segments = [range(len(seq.segments)) for seq in sequences]
elif isinstance(segments, int):
# single segment from index
segments = [[segments] for _ in sequences]
figs_and_axs = []
for seq, segs in zip(sequences, segments):
for s in segs:
s = list(seq.segments.keys())[s]
if save:
try:
from pycqed.analysis import analysis_toolbox as a_tools
folder = a_tools.data_from_time(self.timestamp,
folder=self.MC.datadir(),
auto_fetch=False)
except:
log.warning('Could not determine folder of current '
'experiment. Sequence plot will be saved in '
'current directory.')
folder = "."
import os
save_path = os.path.join(folder,
"_".join((seq.name, s)) + ".png")
save_kwargs = dict(fname=save_path,
bbox_inches="tight")
plot_kwargs.update(dict(save_kwargs=save_kwargs,
savefig=True))
figs_and_axs.append(seq.segments[s].plot(**plot_kwargs))
# avoid returning a list of Nones (if show_and_close is True)
return [v for v in figs_and_axs if v is not None] or None
def __repr__(self):
return f"QuantumExperiment(dev={getattr(self, 'dev', None)}, " \
f"qubits={getattr(self, 'qubits', None)})"
|
|
import bisect
import itertools
import math
from abc import ABCMeta, abstractmethod
import typing as tp
from satella.coding.concurrent.monitor import RMonitor
from satella.coding.recast_exceptions import silence_excs
from satella.coding.sequences import try_close
from satella.coding.typing import V, K, KVTuple
class DBStorage(metaclass=ABCMeta):
"""
An abstract implementation of the storage class provided to
:class:`~satella.coding.structures.SyncableDroppable`
This serves as satella's hook into your database infrastructure.
"""
__slots__ = ()
@abstractmethod
def put(self, key: K, value: V) -> None:
"""
Put given value to storage at given key.
This may block for a while.
:param key: key to store
:param value: value to store
"""
@abstractmethod
def iterate(self, starting_key: tp.Optional[K]) -> tp.Iterator[KVTuple]:
"""
Return an iterator iterating from provided starting key to the end
of the values, as read from the database.
This may block for a while.
This iterator will be closed upon no longer being necessary.
:param starting_key: starting key, included, or None for iterate from the start
:return: an iterator from provided key (included) to the end of the range
"""
@abstractmethod
def on_change_start_entry(self, start_entry: tp.Optional[K]) -> None:
"""
Called by SyncableDroppable when starting entry (earliest entry encountered both in the DB
and is memory) is changed.
:param start_entry: new value of start entry or None if there are no entries at all
"""
@abstractmethod
def on_change_stop_entry(self, stop_entry: tp.Optional[K]) -> None:
"""
Called by SyncableDroppable when stopping entry (earliest entry encountered both in the DB
and is memory) is changed.
:param stop_entry: new value of stop entry or None if there are no entries at all
"""
@abstractmethod
def on_change_synced_up_to(self, synced_up_to: tp.Optional[K]) -> None:
"""
Called by SyncableDroppable when synced up to (earliest timestamp synced) is changed.
:param synced_up_to: new value of synced up to
"""
@abstractmethod
def delete(self, key: K) -> None:
"""
Called by SyncableDroppable when there's a need to remove target key
:param key: key to remove
"""
class SyncableDroppable(RMonitor, tp.Generic[K, V]):
"""
A thread-safe class representing some single time series, which needs to be synchronized with some server,
and may be too large to keep in memory. Moreover, after the sync we need to retain a part of the
time series in memory for future requests. Only series past some timestamp may be deleted.
For brevity, this will refer to keys as timestamps. The keys must be __eq__able, comparable
and subtractable.
A rule is that an item can never be both in memory and in the DB.
:param db_storage: a DBStorage implementation of your own provision, that serves as class'
interface with the database
:param start_entry: earliest timestamp stored or None if no data is stored
:param stop_entry: latest timestamp stored or None if no data is stored
:param synced_up_to: timestamp of the last entry synchronized or None if no data is stored
:param span_to_keep_in_memory: key span to keep in memory. Entries earlier than
difference of the latest key and this will be dropped from memory, onto the DB.
Can't be false.
:param span_to_keep_in_db: key span to keep on disk. Entries earlier than
difference of the latest key and this will be dropped from the DB.
Can't be false.
.. note:: Note that proper handling of maximum spans requires periodical calls to
:meth:`~satella.coding.structures.SyncableDroppable.cleanup`
"""
__slots__ = ('db_storage', '_start_entry', '_stop_entry', '_synced_up_to', 'data_in_memory',
'span_to_keep_in_memory', 'span_to_keep_in_db')
def __init__(self, db_storage: DBStorage, start_entry: tp.Optional[K],
stop_entry: tp.Optional[K], synced_up_to: tp.Optional[K],
span_to_keep_in_memory: int, span_to_keep_in_db: int):
super().__init__()
assert span_to_keep_in_memory and span_to_keep_in_db, 'One of spans was false!'
assert span_to_keep_in_db > span_to_keep_in_memory, 'Invalid span'
self.db_storage = db_storage # type: DBStorage
self._start_entry = start_entry # type: K
self._stop_entry = stop_entry # type: K
self._synced_up_to = synced_up_to # type: K
self.data_in_memory = [] # type: tp.List[KVTuple]
self.span_to_keep_in_db = span_to_keep_in_db # type: K
self.span_to_keep_in_memory = span_to_keep_in_memory # type: K
@property
def start_entry(self) -> tp.Optional[K]:
return self._start_entry
@start_entry.setter
def start_entry(self, v: tp.Optional[K]) -> None:
self._start_entry = v
self.db_storage.on_change_start_entry(v)
@property
def synced_up_to(self) -> tp.Optional[K]:
return self._synced_up_to
@synced_up_to.setter
def synced_up_to(self, v: tp.Optional[K]) -> None:
self._synced_up_to = v
self.db_storage.on_change_synced_up_to(v)
@property
def stop_entry(self) -> tp.Optional[K]:
return self._stop_entry
@stop_entry.setter
def stop_entry(self, v: tp.Optional[K]) -> None:
self._stop_entry = v
self.db_storage.on_change_stop_entry(v)
@RMonitor.synchronized
def sync_to_db(self) -> None:
"""
Make sure that everything's that in memory in also stored in the DB.
"""
for key, value in self.data_in_memory:
self.db_storage.put(key, value)
self.data_in_memory = []
def cleanup(self) -> None:
"""
Make sure that everything's that in memory and the DB conforms to span_to_keep_in_db
and span_to_keep_in_memory.
This may block for a while.
"""
self.cleanup_keep_in_db()
self.cleanup_keep_in_memory()
def _cleanup_the_db(self) -> bool:
"""
Remove entries from the DB that are older than span_to_keep_in_db
:return: if all entries in the DB have been trashed
"""
if self.start_entry is None:
return False
cutoff_span = self.stop_entry - self.span_to_keep_in_db
iterator = self.db_storage.iterate(self.start_entry)
try:
for key, value in iterator:
if key < cutoff_span:
self.db_storage.delete(key)
else:
self.start_entry = key
break
else:
# This means that we have wiped entire DB
if self.data_in_memory:
self.start_entry = self.data_in_memory[0][0]
self.db_storage.on_change_start_entry(self.start_entry)
else:
# We no longer have ANY data
self.start_entry = self.stop_entry = None
return True
finally:
try_close(iterator)
return False
def get_archive(self, start: K, stop: K) -> tp.Iterator[KVTuple]:
"""
Get some historic data that is kept both in the DB and in the memory
:param start: starting key (included)
:param stop: stopping key (included)
:return: a iterator of KVTuple
"""
if not self.data_in_memory:
return []
if self.first_key_in_memory <= start:
# We'll serve it from memory
for key, value in self.data_in_memory:
if key < start:
continue
if key > stop:
return
yield key, value
else:
it = self.db_storage.iterate(start)
try:
for key, value in it:
if key < start:
continue
if key > stop:
return
yield key, value
# We must iterate from the memory
if self.data_in_memory:
yield from self.get_archive(self.first_key_in_memory, stop)
finally:
try_close(it)
def get_latest_value(self) -> KVTuple:
"""
Get the piece of data that was added here last
:return: a tuple of (key, value)
:raise ValueError: no data in series
"""
if self.stop_entry is None:
raise ValueError('No data in series')
if self.data_in_memory:
return self.data_in_memory[-1]
else:
iterator = self.db_storage.iterate(self.stop_entry)
try:
return next(iterator)
finally:
iterator.close()
@RMonitor.synchronized
def cleanup_keep_in_memory(self) -> None:
"""
Eject values from memory that should reside in the DB onto the DB
"""
first_key = self.first_key_in_memory
if first_key is None:
return
cutoff_point = self.stop_entry - self.span_to_keep_in_memory
for index, row in enumerate(self.data_in_memory):
ts, value = row
if ts > cutoff_point:
for ts, value in self.data_in_memory[:index]:
self.db_storage.put(ts, value)
del self.data_in_memory[:index]
break
else:
self.sync_to_db()
self.data_in_memory = []
return
@RMonitor.synchronized
def cleanup_keep_in_db(self) -> None:
"""
Clear up the database to conform to our span_to_keep_in_db
"""
if self.start_entry is None or not self.data_in_memory:
return
cutoff_span = self.stop_entry - self.span_to_keep_in_db
if self.start_entry == self.first_key_in_memory:
# The entire series is loaded in the memory
self.data_in_memory = [tpl for tpl in self.data_in_memory if tpl[0] >= cutoff_span]
if self.data_in_memory:
self.start_entry = self.first_key_in_memory
else:
self.start_entry = self.stop_entry = None
else:
if not self._cleanup_the_db():
return
self.cleanup_keep_in_db()
def on_new_data(self, key: K, value: V) -> None:
"""
Called by the user when there's new data gathered.
Key must be greater than start entry
:param key: key of the new data
:param value: value of the new data
:raise ValueError: key was not larger than current stop entry
"""
if self.stop_entry is not None:
if key <= self.stop_entry:
raise ValueError('Key not greater than current stop entry!')
self.data_in_memory.append((key, value))
self.stop_entry = key
if self.start_entry is None:
self.start_entry = key
def on_sync_request(self, maximum_entries: tp.Optional[int] = math.inf) -> tp.Iterator[KVTuple]:
"""
Return an iterator that will provide the source of the data for synchronization.
This will preferentially start from the first value, so as to keep values synchronized
in-order.
:param maximum_entries:
:return: an iterator of (KVTuple) that should be synchronized against the server
:raise ValueError: nothing to synchronize!
"""
if not self.start_entry:
raise ValueError('Nothing to synchronize!')
if self.synced_up_to == self.data_in_memory[-1][0]:
raise ValueError('Nothing to synchronize!')
if self.synced_up_to is None:
# Sync everything
iterator = self.db_storage.iterate(None)
try:
data = list(iterator)
if len(data) < maximum_entries:
entries_left = maximum_entries - len(data)
if entries_left == math.inf:
data = itertools.chain(data, self.data_in_memory)
else:
data = itertools.chain(data, self.data_in_memory[:entries_left])
v = data
finally:
try_close(iterator)
else:
if self.first_key_in_memory <= self.synced_up_to:
# Means we have to sync from memory
if self.synced_up_to is None:
v = self.data_in_memory
else:
index = bisect.bisect_right([y[0] for y in self.data_in_memory],
self.synced_up_to)
if maximum_entries == math.inf:
v = self.data_in_memory[index:]
else:
v = self.data_in_memory[index:index + maximum_entries]
else:
# We have to start off the disk
data = []
iterator = self.db_storage.iterate(self.start_entry)
try:
while len(data) < maximum_entries:
try:
data.append(next(iterator))
except StopIteration:
for index, tpl in enumerate(self.data_in_memory):
if len(data) >= maximum_entries:
break
if self.synced_up_to is not None:
if tpl[0] > self.synced_up_to:
break
v = itertools.chain(data, self.data_in_memory[:index])
break
else:
v = data
finally:
try_close(iterator)
return v
def on_synced_up_to(self, key: K) -> None:
"""
Called when data was successfully synced up to key included
:param key: maximum key synchronized
"""
self.synced_up_to = key
@property
def first_key_in_memory(self) -> tp.Optional[K]:
"""
:return: key of the first element stored in memory
"""
if not self.data_in_memory:
return None
else:
return self.data_in_memory[0][0]
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from qpid.client import Client, Closed
from qpid.queue import Empty
from qpid.content import Content
from qpid.testlib import TestBase
class BasicTests(TestBase):
"""Tests for 'methods' on the amqp basic 'class'"""
def test_consume_no_local(self):
"""
Test that the no_local flag is honoured in the consume method
"""
channel = self.channel
#setup, declare two queues:
channel.queue_declare(queue="test-queue-1a", exclusive=True)
channel.queue_declare(queue="test-queue-1b", exclusive=True)
#establish two consumers one of which excludes delivery of locally sent messages
channel.basic_consume(consumer_tag="local_included", queue="test-queue-1a")
channel.basic_consume(consumer_tag="local_excluded", queue="test-queue-1b", no_local=True)
#send a message
channel.basic_publish(routing_key="test-queue-1a", content=Content("consume_no_local"))
channel.basic_publish(routing_key="test-queue-1b", content=Content("consume_no_local"))
#check the queues of the two consumers
excluded = self.client.queue("local_excluded")
included = self.client.queue("local_included")
msg = included.get(timeout=1)
self.assertEqual("consume_no_local", msg.content.body)
try:
excluded.get(timeout=1)
self.fail("Received locally published message though no_local=true")
except Empty: None
def test_consume_exclusive(self):
"""
Test that the exclusive flag is honoured in the consume method
"""
channel = self.channel
#setup, declare a queue:
channel.queue_declare(queue="test-queue-2", exclusive=True)
#check that an exclusive consumer prevents other consumer being created:
channel.basic_consume(consumer_tag="first", queue="test-queue-2", exclusive=True)
try:
channel.basic_consume(consumer_tag="second", queue="test-queue-2")
self.fail("Expected consume request to fail due to previous exclusive consumer")
except Closed, e:
self.assertChannelException(403, e.args[0])
#open new channel and cleanup last consumer:
channel = self.client.channel(2)
channel.channel_open()
#check that an exclusive consumer cannot be created if a consumer already exists:
channel.basic_consume(consumer_tag="first", queue="test-queue-2")
try:
channel.basic_consume(consumer_tag="second", queue="test-queue-2", exclusive=True)
self.fail("Expected exclusive consume request to fail due to previous consumer")
except Closed, e:
self.assertChannelException(403, e.args[0])
def test_reconnect_to_durable_subscription(self):
try:
publisherchannel = self.channel
my_id = "my_id"
consumer_connection_properties_with_instance = {"instance": my_id}
queue_for_subscription = "queue_for_subscription_%s" % my_id
topic_name = "my_topic_name"
test_message = self.uniqueString()
durable_subscription_client = self.connect(client_properties=consumer_connection_properties_with_instance)
consumerchannel = durable_subscription_client.channel(1)
consumerchannel.channel_open()
self._declare_and_bind_exclusive_queue_on_topic_exchange(consumerchannel, queue_for_subscription, topic_name)
# disconnect
durable_subscription_client.close()
# send message to topic
publisherchannel.basic_publish(routing_key=topic_name, exchange="amq.topic", content=Content(test_message))
# reconnect and consume message
durable_subscription_client = self.connect(client_properties=consumer_connection_properties_with_instance)
consumerchannel = durable_subscription_client.channel(1)
consumerchannel.channel_open()
self._declare_and_bind_exclusive_queue_on_topic_exchange(consumerchannel, queue_for_subscription, topic_name)
# Create consumer and consume the message that was sent whilst subscriber was disconnected. By convention we
# declare the consumer as exclusive to forbid concurrent access.
subscription = consumerchannel.basic_consume(queue=queue_for_subscription, exclusive=True)
queue = durable_subscription_client.queue(subscription.consumer_tag)
# consume and verify message content
msg = queue.get(timeout=1)
self.assertEqual(test_message, msg.content.body)
consumerchannel.basic_ack(delivery_tag=msg.delivery_tag)
finally:
consumerchannel.queue_delete(queue=queue_for_subscription)
durable_subscription_client.close()
def _declare_and_bind_exclusive_queue_on_topic_exchange(self, channel, queue, topic_name):
channel.queue_declare(queue=queue, exclusive=True, auto_delete=False, durable=True)
channel.queue_bind(exchange="amq.topic", queue=queue, routing_key=topic_name)
def test_consume_queue_errors(self):
"""
Test error conditions associated with the queue field of the consume method:
"""
channel = self.channel
try:
#queue specified but doesn't exist:
channel.basic_consume(queue="invalid-queue")
self.fail("Expected failure when consuming from non-existent queue")
except Closed, e:
self.assertChannelException(404, e.args[0])
channel = self.client.channel(2)
channel.channel_open()
try:
#queue not specified and none previously declared for channel:
channel.basic_consume(queue="")
self.fail("Expected failure when consuming from unspecified queue")
except Closed, e:
self.assertConnectionException(530, e.args[0])
def test_consume_unique_consumers(self):
"""
Ensure unique consumer tags are enforced
"""
channel = self.channel
#setup, declare a queue:
channel.queue_declare(queue="test-queue-3", exclusive=True)
#check that attempts to use duplicate tags are detected and prevented:
channel.basic_consume(consumer_tag="first", queue="test-queue-3")
try:
channel.basic_consume(consumer_tag="first", queue="test-queue-3")
self.fail("Expected consume request to fail due to non-unique tag")
except Closed, e:
self.assertConnectionException(530, e.args[0])
def test_cancel(self):
"""
Test compliance of the basic.cancel method
"""
channel = self.channel
#setup, declare a queue:
channel.queue_declare(queue="test-queue-4", exclusive=True)
channel.basic_consume(consumer_tag="my-consumer", queue="test-queue-4")
channel.basic_publish(routing_key="test-queue-4", content=Content("One"))
myqueue = self.client.queue("my-consumer")
msg = myqueue.get(timeout=1)
self.assertEqual("One", msg.content.body)
#cancel should stop messages being delivered
channel.basic_cancel(consumer_tag="my-consumer")
channel.basic_publish(routing_key="test-queue-4", content=Content("Two"))
try:
msg = myqueue.get(timeout=1)
self.fail("Got message after cancellation: " + msg)
except Empty: None
#cancellation of non-existant consumers should be handled without error
channel.basic_cancel(consumer_tag="my-consumer")
channel.basic_cancel(consumer_tag="this-never-existed")
def test_ack(self):
"""
Test basic ack/recover behaviour
"""
channel = self.channel
channel.queue_declare(queue="test-ack-queue", exclusive=True)
reply = channel.basic_consume(queue="test-ack-queue", no_ack=False)
queue = self.client.queue(reply.consumer_tag)
channel.basic_publish(routing_key="test-ack-queue", content=Content("One"))
channel.basic_publish(routing_key="test-ack-queue", content=Content("Two"))
channel.basic_publish(routing_key="test-ack-queue", content=Content("Three"))
channel.basic_publish(routing_key="test-ack-queue", content=Content("Four"))
channel.basic_publish(routing_key="test-ack-queue", content=Content("Five"))
msg1 = queue.get(timeout=1)
msg2 = queue.get(timeout=1)
msg3 = queue.get(timeout=1)
msg4 = queue.get(timeout=1)
msg5 = queue.get(timeout=1)
self.assertEqual("One", msg1.content.body)
self.assertEqual("Two", msg2.content.body)
self.assertEqual("Three", msg3.content.body)
self.assertEqual("Four", msg4.content.body)
self.assertEqual("Five", msg5.content.body)
channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
channel.basic_recover(requeue=False)
msg3b = queue.get(timeout=1)
msg5b = queue.get(timeout=1)
self.assertEqual("Three", msg3b.content.body)
self.assertEqual("Five", msg5b.content.body)
try:
extra = queue.get(timeout=1)
self.fail("Got unexpected message: " + extra.content.body)
except Empty: None
def test_recover_requeue(self):
"""
Test requeing on recovery
"""
channel = self.channel
channel.queue_declare(queue="test-requeue", exclusive=True)
subscription = channel.basic_consume(queue="test-requeue", no_ack=False)
queue = self.client.queue(subscription.consumer_tag)
channel.basic_publish(routing_key="test-requeue", content=Content("One"))
channel.basic_publish(routing_key="test-requeue", content=Content("Two"))
channel.basic_publish(routing_key="test-requeue", content=Content("Three"))
channel.basic_publish(routing_key="test-requeue", content=Content("Four"))
channel.basic_publish(routing_key="test-requeue", content=Content("Five"))
msg1 = queue.get(timeout=1)
msg2 = queue.get(timeout=1)
msg3 = queue.get(timeout=1)
msg4 = queue.get(timeout=1)
msg5 = queue.get(timeout=1)
self.assertEqual("One", msg1.content.body)
self.assertEqual("Two", msg2.content.body)
self.assertEqual("Three", msg3.content.body)
self.assertEqual("Four", msg4.content.body)
self.assertEqual("Five", msg5.content.body)
channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
channel.basic_cancel(consumer_tag=subscription.consumer_tag)
channel.basic_recover(requeue=True)
subscription2 = channel.basic_consume(queue="test-requeue")
queue2 = self.client.queue(subscription2.consumer_tag)
msg3b = queue2.get(timeout=1)
msg5b = queue2.get(timeout=1)
self.assertEqual("Three", msg3b.content.body)
self.assertEqual("Five", msg5b.content.body)
self.assertEqual(True, msg3b.redelivered)
self.assertEqual(True, msg5b.redelivered)
try:
extra = queue2.get(timeout=1)
self.fail("Got unexpected message in second queue: " + extra.content.body)
except Empty: None
try:
extra = queue.get(timeout=1)
self.fail("Got unexpected message in original queue: " + extra.content.body)
except Empty: None
def test_qos_prefetch_count(self):
"""
Test that the prefetch count specified is honoured
"""
#setup: declare queue and subscribe
channel = self.channel
channel.queue_declare(queue="test-prefetch-count", exclusive=True)
subscription = channel.basic_consume(queue="test-prefetch-count", no_ack=False)
queue = self.client.queue(subscription.consumer_tag)
#set prefetch to 5:
channel.basic_qos(prefetch_count=5)
#publish 10 messages:
for i in range(1, 11):
channel.basic_publish(routing_key="test-prefetch-count", content=Content("Message %d" % i))
#only 5 messages should have been delivered:
for i in range(1, 6):
msg = queue.get(timeout=1)
self.assertEqual("Message %d" % i, msg.content.body)
try:
extra = queue.get(timeout=1)
self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
except Empty: None
#ack messages and check that the next set arrive ok:
channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
for i in range(6, 11):
msg = queue.get(timeout=1)
self.assertEqual("Message %d" % i, msg.content.body)
channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
try:
extra = queue.get(timeout=1)
self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
except Empty: None
def test_qos_prefetch_size(self):
"""
Test that the prefetch size specified is honoured
"""
#setup: declare queue and subscribe
channel = self.channel
channel.queue_declare(queue="test-prefetch-size", exclusive=True)
subscription = channel.basic_consume(queue="test-prefetch-size", no_ack=False)
queue = self.client.queue(subscription.consumer_tag)
#set prefetch to 50 bytes (each message is 9 or 10 bytes):
channel.basic_qos(prefetch_size=50)
#publish 10 messages:
for i in range(1, 11):
channel.basic_publish(routing_key="test-prefetch-size", content=Content("Message %d" % i))
#only 5 messages should have been delivered (i.e. 45 bytes worth):
for i in range(1, 6):
msg = queue.get(timeout=1)
self.assertEqual("Message %d" % i, msg.content.body)
try:
extra = queue.get(timeout=1)
self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
except Empty: None
#ack messages and check that the next set arrive ok:
channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
for i in range(6, 11):
msg = queue.get(timeout=1)
self.assertEqual("Message %d" % i, msg.content.body)
channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
try:
extra = queue.get(timeout=1)
self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
except Empty: None
#make sure that a single oversized message still gets delivered
large = "abcdefghijklmnopqrstuvwxyz"
large = large + "-" + large;
channel.basic_publish(routing_key="test-prefetch-size", content=Content(large))
msg = queue.get(timeout=1)
self.assertEqual(large, msg.content.body)
def test_get(self):
"""
Test basic_get method
"""
channel = self.channel
channel.queue_declare(queue="test-get", exclusive=True)
#publish some messages (no_ack=True)
for i in range(1, 11):
channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
#use basic_get to read back the messages, and check that we get an empty at the end
for i in range(1, 11):
reply = channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get_ok")
self.assertEqual("Message %d" % i, reply.content.body)
reply = channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get_empty")
#repeat for no_ack=False
for i in range(11, 21):
channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
for i in range(11, 21):
reply = channel.basic_get(no_ack=False)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get_ok")
self.assertEqual("Message %d" % i, reply.content.body)
if(i == 13):
channel.basic_ack(delivery_tag=reply.delivery_tag, multiple=True)
if(i in [15, 17, 19]):
channel.basic_ack(delivery_tag=reply.delivery_tag)
reply = channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get_empty")
#recover(requeue=True)
channel.basic_recover(requeue=True)
#get the unacked messages again (14, 16, 18, 20)
for i in [14, 16, 18, 20]:
reply = channel.basic_get(no_ack=False)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get_ok")
self.assertEqual("Message %d" % i, reply.content.body)
channel.basic_ack(delivery_tag=reply.delivery_tag)
reply = channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get_empty")
channel.basic_recover(requeue=True)
reply = channel.basic_get(no_ack=True)
self.assertEqual(reply.method.klass.name, "basic")
self.assertEqual(reply.method.name, "get_empty")
|
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers
"""
from __future__ import print_function
import errno
import os
import socket
import ssl
import sys
import time
import eventlet.wsgi
from oslo_config import cfg
import oslo_i18n
from oslo_log import log as logging
from oslo_log import loggers
from oslo_serialization import jsonutils
from oslo_service import service as common_service
from oslo_service import systemd
from oslo_utils import excutils
import routes.middleware
import six
import webob.dec
import webob.exc
from neutron.common import config
from neutron.common import exceptions as exception
from neutron import context
from neutron.db import api
from neutron.i18n import _LE, _LI
socket_opts = [
cfg.IntOpt('backlog',
default=4096,
help=_("Number of backlog requests to configure "
"the socket with")),
cfg.IntOpt('tcp_keepidle',
default=600,
help=_("Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X.")),
cfg.IntOpt('retry_until_window',
default=30,
help=_("Number of seconds to keep retrying to listen")),
cfg.IntOpt('max_header_line',
default=16384,
help=_("Max header line to accommodate large tokens")),
cfg.BoolOpt('use_ssl',
default=False,
help=_('Enable SSL on the API server')),
cfg.StrOpt('ssl_ca_file',
help=_("CA certificate file to use to verify "
"connecting clients")),
cfg.StrOpt('ssl_cert_file',
help=_("Certificate file to use when starting "
"the server securely")),
cfg.StrOpt('ssl_key_file',
help=_("Private key file to use when starting "
"the server securely")),
cfg.BoolOpt('wsgi_keep_alive',
default=True,
help=_("Determines if connections are allowed to be held "
"open by clients after a request is fulfilled. A value "
"of False will ensure that the socket connection will "
"be explicitly closed once a response has been sent to "
"the client.")),
cfg.IntOpt('client_socket_timeout', default=900,
help=_("Timeout for client connections socket operations. "
"If an incoming connection is idle for this number of "
"seconds it will be closed. A value of '0' means "
"wait forever.")),
]
CONF = cfg.CONF
CONF.register_opts(socket_opts)
LOG = logging.getLogger(__name__)
def encode_body(body):
"""Encode unicode body.
WebOb requires to encode unicode body used to update response body.
"""
if isinstance(body, six.text_type):
return body.encode('utf-8')
return body
class WorkerService(common_service.ServiceBase):
"""Wraps a worker to be handled by ProcessLauncher"""
def __init__(self, service, application):
self._service = service
self._application = application
self._server = None
def start(self):
# When api worker is stopped it kills the eventlet wsgi server which
# internally closes the wsgi server socket object. This server socket
# object becomes not usable which leads to "Bad file descriptor"
# errors on service restart.
# Duplicate a socket object to keep a file descriptor usable.
dup_sock = self._service._socket.dup()
if CONF.use_ssl:
dup_sock = self._service.wrap_ssl(dup_sock)
self._server = self._service.pool.spawn(self._service._run,
self._application,
dup_sock)
def wait(self):
if isinstance(self._server, eventlet.greenthread.GreenThread):
self._server.wait()
def stop(self):
if isinstance(self._server, eventlet.greenthread.GreenThread):
self._server.kill()
self._server = None
@staticmethod
def reset():
config.reset_service()
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, name, num_threads=1000):
# Raise the default from 8192 to accommodate large tokens
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.num_threads = num_threads
# Pool for a greenthread in which wsgi server will be running
self.pool = eventlet.GreenPool(1)
self.name = name
self._server = None
# A value of 0 is converted to None because None is what causes the
# wsgi server to wait forever.
self.client_socket_timeout = CONF.client_socket_timeout or None
if CONF.use_ssl:
self._check_ssl_settings()
def _get_socket(self, host, port, backlog):
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
LOG.exception(_LE("Unable to listen on %(host)s:%(port)s"),
{'host': host, 'port': port})
sys.exit(1)
sock = None
retry_until = time.time() + CONF.retry_until_window
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr,
backlog=backlog,
family=family)
except socket.error as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.errno == errno.EADDRINUSE:
ctxt.reraise = False
eventlet.sleep(0.1)
if not sock:
raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
"after trying for %(time)d seconds") %
{'host': host,
'port': port,
'time': CONF.retry_until_window})
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
return sock
@staticmethod
def _check_ssl_settings():
if not os.path.exists(CONF.ssl_cert_file):
raise RuntimeError(_("Unable to find ssl_cert_file "
": %s") % CONF.ssl_cert_file)
# ssl_key_file is optional because the key may be embedded in the
# certificate file
if CONF.ssl_key_file and not os.path.exists(CONF.ssl_key_file):
raise RuntimeError(_("Unable to find "
"ssl_key_file : %s") % CONF.ssl_key_file)
# ssl_ca_file is optional
if CONF.ssl_ca_file and not os.path.exists(CONF.ssl_ca_file):
raise RuntimeError(_("Unable to find ssl_ca_file "
": %s") % CONF.ssl_ca_file)
@staticmethod
def wrap_ssl(sock):
ssl_kwargs = {'server_side': True,
'certfile': CONF.ssl_cert_file,
'keyfile': CONF.ssl_key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl_ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
def start(self, application, port, host='0.0.0.0', workers=0):
"""Run a WSGI server with the given application."""
self._host = host
self._port = port
backlog = CONF.backlog
self._socket = self._get_socket(self._host,
self._port,
backlog=backlog)
self._launch(application, workers)
def _launch(self, application, workers=0):
service = WorkerService(self, application)
if workers < 1:
# The API service should run in the current process.
self._server = service
# Dump the initial option values
cfg.CONF.log_opt_values(LOG, logging.DEBUG)
service.start()
systemd.notify_once()
else:
# dispose the whole pool before os.fork, otherwise there will
# be shared DB connections in child processes which may cause
# DB errors.
api.dispose()
# The API service runs in a number of child processes.
# Minimize the cost of checking for child exit by extending the
# wait interval past the default of 0.01s.
self._server = common_service.ProcessLauncher(cfg.CONF,
wait_interval=1.0)
self._server.launch_service(service, workers=workers)
@property
def host(self):
return self._socket.getsockname()[0] if self._socket else self._host
@property
def port(self):
return self._socket.getsockname()[1] if self._socket else self._port
def stop(self):
self._server.stop()
def wait(self):
"""Wait until all servers have completed running."""
try:
self._server.wait()
except KeyboardInterrupt:
pass
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
eventlet.wsgi.server(socket, application,
max_size=self.num_threads,
log=loggers.WritableLogger(LOG),
keepalive=CONF.wsgi_keep_alive,
socket_timeout=self.client_socket_timeout)
class Middleware(object):
"""Base WSGI middleware wrapper.
These classes require an application to be initialized that will be called
next. By default the middleware will simply call its wrapped app, or you
can override __call__ to customize its behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Request(webob.Request):
def best_match_content_type(self):
"""Determine the most acceptable content-type.
Based on:
1) URI extension (.json)
2) Content-type header
3) Accept* headers
"""
# First lookup http request path
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
_format = parts[1]
if _format in ['json']:
return 'application/{0}'.format(_format)
#Then look up content header
type_from_header = self.get_content_type()
if type_from_header:
return type_from_header
ctypes = ['application/json']
#Finally search in Accept-* headers
bm = self.accept.best_match(ctypes)
return bm or 'application/json'
def get_content_type(self):
allowed_types = ("application/json")
if "Content-Type" not in self.headers:
LOG.debug("Missing Content-Type")
return None
_type = self.content_type
if _type in allowed_types:
return _type
return None
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
all_languages = oslo_i18n.get_available_languages('neutron')
return self.accept_language.best_match(all_languages)
@property
def context(self):
if 'neutron.context' not in self.environ:
self.environ['neutron.context'] = context.get_admin_context()
return self.environ['neutron.context']
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
def sanitizer(obj):
return six.text_type(obj)
return encode_body(jsonutils.dumps(data, default=sanitizer))
class ResponseHeaderSerializer(ActionDispatcher):
"""Default response headers serialization."""
def serialize(self, response, data, action):
self.dispatch(response, data, action=action)
def default(self, response, data):
response.status_int = 200
class ResponseSerializer(object):
"""Encode the necessary pieces into a response object."""
def __init__(self, body_serializers=None, headers_serializer=None):
self.body_serializers = {
'application/json': JSONDictSerializer(),
}
self.body_serializers.update(body_serializers or {})
self.headers_serializer = (headers_serializer or
ResponseHeaderSerializer())
def serialize(self, response_data, content_type, action='default'):
"""Serialize a dict into a string and wrap in a wsgi.Request object.
:param response_data: dict produced by the Controller
:param content_type: expected mimetype of serialized response body
"""
response = webob.Response()
self.serialize_headers(response, response_data, action)
self.serialize_body(response, response_data, content_type, action)
return response
def serialize_headers(self, response, data, action):
self.headers_serializer.serialize(response, data, action)
def serialize_body(self, response, data, content_type, action):
response.headers['Content-Type'] = content_type
if data is not None:
serializer = self.get_body_serializer(content_type)
response.body = serializer.serialize(data, action)
def get_body_serializer(self, content_type):
try:
return self.body_serializers[content_type]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("Cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class RequestHeadersDeserializer(ActionDispatcher):
"""Default request headers deserializer."""
def deserialize(self, request, action):
return self.dispatch(request, action=action)
def default(self, request):
return {}
class RequestDeserializer(object):
"""Break up a Request object into more useful pieces."""
def __init__(self, body_deserializers=None, headers_deserializer=None):
self.body_deserializers = {
'application/json': JSONDeserializer(),
}
self.body_deserializers.update(body_deserializers or {})
self.headers_deserializer = (headers_deserializer or
RequestHeadersDeserializer())
def deserialize(self, request):
"""Extract necessary pieces of the request.
:param request: Request object
:returns tuple of expected controller action name, dictionary of
keyword arguments to pass to the controller, the expected
content type of the response
"""
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
action_args.update(self.deserialize_headers(request, action))
action_args.update(self.deserialize_body(request, action))
accept = self.get_expected_content_type(request)
return (action, action_args, accept)
def deserialize_headers(self, request, action):
return self.headers_deserializer.deserialize(request, action)
def deserialize_body(self, request, action):
try:
content_type = request.best_match_content_type()
except exception.InvalidContentType:
LOG.debug("Unrecognized Content-Type provided in request")
return {}
if content_type is None:
LOG.debug("No Content-Type provided in request")
return {}
if not len(request.body) > 0:
LOG.debug("Empty body provided in request")
return {}
try:
deserializer = self.get_body_deserializer(content_type)
except exception.InvalidContentType:
with excutils.save_and_reraise_exception():
LOG.debug("Unable to deserialize body as provided "
"Content-Type")
return deserializer.deserialize(request.body, action)
def get_body_deserializer(self, content_type):
try:
return self.body_deserializers[content_type]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def get_expected_content_type(self, request):
return request.best_match_content_type()
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except Exception:
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import neutron.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Debug(Middleware):
"""Middleware for debugging.
Helper class that can be inserted into any WSGI application chain
to get information about the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
print()
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in six.iteritems(resp.headers):
print(key, "=", value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Print contents of a wrapper string iterator when iterated."""
print(("*" * 40) + " BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be a wsgi.Controller, who will route
the request to the action method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, "/svrlist", controller=sc, action="list")
# Actions are all implicitly defined
mapper.resource("network", "networks", controller=nc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch a Request.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
language = req.best_match_language()
msg = _('The resource could not be found.')
msg = oslo_i18n.translate(msg, language)
return webob.exc.HTTPNotFound(explanation=msg)
app = match['controller']
return app
class Resource(Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
"""
def __init__(self, controller, fault_body_function,
deserializer=None, serializer=None):
"""Object initialization.
:param controller: object that implement methods created by routes lib
:param deserializer: object that can serialize the output of a
controller into a webob response
:param serializer: object that can deserialize a webob request
into necessary pieces
:param fault_body_function: a function that will build the response
body for HTTP errors raised by operations
on this resource object
"""
self.controller = controller
self.deserializer = deserializer or RequestDeserializer()
self.serializer = serializer or ResponseSerializer()
self._fault_body_function = fault_body_function
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info(_LI("%(method)s %(url)s"),
{"method": request.method, "url": request.url})
try:
action, args, accept = self.deserializer.deserialize(request)
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
LOG.exception(_LE("InvalidContentType: %s"), msg)
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
LOG.exception(_LE("MalformedRequestBody: %s"), msg)
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
try:
action_result = self.dispatch(request, action, args)
except webob.exc.HTTPException as ex:
LOG.info(_LI("HTTP exception thrown: %s"), ex)
action_result = Fault(ex, self._fault_body_function)
except Exception:
LOG.exception(_LE("Internal error"))
# Do not include the traceback to avoid returning it to clients.
action_result = Fault(webob.exc.HTTPServerError(),
self._fault_body_function)
if isinstance(action_result, dict) or action_result is None:
response = self.serializer.serialize(action_result,
accept,
action=action)
else:
response = action_result
try:
LOG.info(_LI("%(url)s returned with HTTP %(status)d"),
dict(url=request.url, status=response.status_int))
except AttributeError as e:
LOG.info(_LI("%(url)s returned a fault: %(exception)s"),
dict(url=request.url, exception=e))
return response
def dispatch(self, request, action, action_args):
"""Find action-spefic method on controller and call it."""
controller_method = getattr(self.controller, action)
try:
#NOTE(salvatore-orlando): the controller method must have
# an argument whose name is 'request'
return controller_method(request=request, **action_args)
except TypeError as exc:
LOG.exception(exc)
return Fault(webob.exc.HTTPBadRequest())
def _default_body_function(wrapped_exc):
code = wrapped_exc.status_int
fault_data = {
'Error': {
'code': code,
'message': wrapped_exc.explanation}}
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {'Error': 'code'}}
return fault_data, metadata
class Fault(webob.exc.HTTPException):
"""Generates an HTTP response from a webob HTTP exception."""
def __init__(self, exception, body_function=None):
"""Creates a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
self.status_int = self.wrapped_exc.status_int
self._body_function = body_function or _default_body_function
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
fault_data, metadata = self._body_function(self.wrapped_exc)
content_type = req.best_match_content_type()
serializer = {
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
# NOTE(salvatore-orlando): this class will go once the
# extension API framework is updated
class Controller(object):
"""WSGI app that dispatched to methods.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon itself. All action methods
must, in addition to their normal parameters, accept a 'req' argument
which is the incoming wsgi.Request. They raise a webob.exc exception,
or return a dict which will be serialized by requested content type.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Call the method specified in req.environ by RoutesMiddleware."""
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
method = getattr(self, action)
del arg_dict['controller']
del arg_dict['action']
if 'format' in arg_dict:
del arg_dict['format']
arg_dict['request'] = req
result = method(**arg_dict)
if isinstance(result, dict) or result is None:
if result is None:
status = 204
content_type = ''
body = None
else:
status = 200
content_type = req.best_match_content_type()
body = self._serialize(result, content_type)
response = webob.Response(status=status,
content_type=content_type,
body=body)
LOG.debug("%(url)s returned with HTTP %(status)d",
dict(url=req.url, status=response.status_int))
return response
else:
return result
def _serialize(self, data, content_type):
"""Serialize the given dict to the provided content_type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), '_serialization_metadata', {})
serializer = Serializer(_metadata)
try:
return serializer.serialize(data, content_type)
except exception.InvalidContentType:
msg = _('The requested content type %s is invalid.') % content_type
raise webob.exc.HTTPNotAcceptable(msg)
def _deserialize(self, data, content_type):
"""Deserialize the request body to the specefied content type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), '_serialization_metadata', {})
serializer = Serializer(_metadata)
return serializer.deserialize(data, content_type)['body']
# NOTE(salvatore-orlando): this class will go once the
# extension API framework is updated
class Serializer(object):
"""Serializes and deserializes dictionaries to certain MIME types."""
def __init__(self, metadata=None):
"""Create a serializer based on the given WSGI environment.
'metadata' is an optional dict mapping MIME types to information
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
def _get_serialize_handler(self, content_type):
handlers = {
'application/json': JSONDictSerializer(),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
def serialize(self, data, content_type):
"""Serialize a dictionary into the specified content type."""
return self._get_serialize_handler(content_type).serialize(data)
def deserialize(self, datastring, content_type):
"""Deserialize a string to a dictionary.
The string must be in the format of a supported MIME type.
"""
try:
return self.get_deserialize_handler(content_type).deserialize(
datastring)
except Exception:
raise webob.exc.HTTPBadRequest(_("Could not deserialize data"))
def get_deserialize_handler(self, content_type):
handlers = {
'application/json': JSONDeserializer(),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
|
|
"""
This module implements various types of multipliers using algorithms like Booth's and Robertson's
"""
from BinPy.bittools.bittools import *
import itertools
def booths_multiply(multiplicand, multiplier, bits=None, signed=False):
"""
Multiply the multiplicand ( binary represented ) and multiplier ( binary represented )
based on Booth's multiplication algorithm.
If signed is set to true the input_data is assumed to be in a signed binary representation
The result is a signed binary string representing the product.
USAGE:
# Unsigned multiplication
>>> booths_multiply('101', '111', 5, signed=False)
'0000100011'
# Passing 2s Complement Signed binary string
>>> product = booths_multiply('0101','1001', 5, signed=True)
>>> print ( product )
'1111011101'
>>> to_signed_int(product)
-35
# Passing - signed binary string
>>> product = booths_multiply('101','-111', 5)
>>> print ( product )
'1111011101'
>>> to_signed_int(product)
-35
"""
if bits is None:
# DEV NOTE: Do not replace 0b in the inputs, the BinPyBits will
# handle the same.
if not signed:
if "0b" not in multiplier:
multiplier = multiplier.lstrip("0")
if "0b" not in multiplicand:
multiplicand = multiplicand.lstrip("0")
bits = max(len(multiplier), len(multiplicand))
if bits == len(multiplier) and "0b" in multiplier:
bits -= 2
elif bits == len(multiplicand) and "0b" in multiplicand:
bits -= 2
if bits == 0:
bits = 1
len_input = bits
multiplicand = BinPyBits(multiplicand, bits, signed)
multiplier = BinPyBits(multiplier, bits, signed)
product = BitArray(int=0, length=bits)
product += multiplier
prev_bit = "0"
multiplicand += BitArray(int=0, length=bits)
for i in range(bits):
check = product.bin[-1] + prev_bit
if check == "01":
product.int += multiplicand.int
if check == "10":
product.int -= multiplicand.int
prev_bit = product.bin[-1]
product.int >>= 1
return product.bin
def robertsons_multiply(multiplicand, multiplier, bits=None, signed=False):
"""
Multiply the multiplicand ( binary represented ) and multiplier ( binary represented )
based on Robertson's multiplication algorithm.
If signed is set to true the input_data is assumed to be in a signed binary representation
The result is a signed binary string representing the product.
USAGE:
# Unsigned multiplication
>>> robertsons_multiply('101', '111', 5, signed=False)
'0000100011'
# Passing 2s Complement Signed binary string
>>> product = robertsons_multiply('0101','1001', 5, signed=True)
>>> print ( product )
'1111011101'
>>> to_signed_int(product)
-35
# Passing - signed binary string
>>> product = robertsons_multiply('101','-111', 5)
>>> print ( product )
'1111011101'
>>> to_signed_int(product)
-35
"""
if bits is None:
# DEV NOTE: Do not replace 0b in the inputs, the BinPyBits will
# handle the same.
if not signed:
if "0b" not in multiplier:
multiplier = multiplier.lstrip("0")
if "0b" not in multiplicand:
multiplicand = multiplicand.lstrip("0")
bits = max(len(multiplier), len(multiplicand))
if bits == len(multiplier) and "0b" in multiplier:
bits -= 2
elif bits == len(multiplicand) and "0b" in multiplicand:
bits -= 2
if bits == 0:
bits = 1
len_input = bits
bits += 1
multiplicand = BinPyBits(multiplicand, bits, signed)
multiplier = BinPyBits(multiplier, bits, signed)
product = BitArray(int=0, length=bits)
product += multiplier
multiplicand += BitArray(int=0, length=bits)
# Create a Bit Vector with length 1 greater than the length of product array
# To handle overflow due to addition or subtraction
addition_result = BitArray(int=0, length=bits * 2 + 1)
for i in range(bits - 1):
if product.bin[-1] == "1":
addition_result.int = product.int + multiplicand.int
# Neglecting the carry and hence handling the overflow ( if it
# occurs)
product.bin = addition_result.bin[1:]
product.int >>= 1
if product.bin[-1] == "1":
addition_result.int = product.int - multiplicand.int
# Setting the last bit to 0 and ignoring the first bit
product.bin = addition_result.bin[1:]
# Do a final right shift
product.int >>= 1
len_result = 2 * len_input
return BinPyBits(product.bin, len_result, signed).bin
def karatsuba_multiply(multiplier, multiplicand, bits=None, signed=False):
"""
Multiply the multiplicand ( binary represented ) and multiplier ( binary represented )
based on Karatsuba fast multiplication algorithm.
If the signed is True, the input_data is assumed to be in a signed binary representation.
The result is a signed binary string representing the product.
USAGE:
# Both Positive inputs
>>> karatsuba_multiply('0101', '0111')
'0000100011'
# Passing 2s Complement Signed binary string as one input
>>> product = karatsuba_multiply('0101','1001')
>>> print ( product )
'1111011101'
>>> to_signed_int(product)
-35
# Passing - signed binary string
>>> product = karatsuba_multiply('101','-111')
>>> print ( product )
'1111011101'
>>> to_signed_int(product)
-35
"""
if bits is None:
# DEV NOTE: Do not replace 0b in the inputs, the BinPyBits will
# handle the same.
if not signed:
if "0b" not in multiplier:
multiplier = multiplier.lstrip("0")
if "0b" not in multiplicand:
multiplicand = multiplicand.lstrip("0")
bits = max(len(multiplier), len(multiplicand))
if bits == len(multiplier) and "0b" in multiplier:
bits -= 2
elif bits == len(multiplicand) and "0b" in multiplicand:
bits -= 2
if bits == 0:
bits = 1
multiplicand = BinPyBits(multiplicand, bits, signed)
multiplier = BinPyBits(multiplier, bits, signed)
sign_bit = None
if (signed):
# The following set of operations will run only during the first
# pass, if the signed is set to True
# Calculating the sign of the product
if ((multiplicand.bin[0] == "1") ^ (multiplier.bin[0] == "1")):
sign_bit = 1
else:
sign_bit = 0
# Strip off the sign bit and convert to a BitArray with unsigned
# binary string.
multiplicand = BinPyBits(
abs(multiplicand.int), bits, signed=False)
multiplier = BinPyBits(
abs(multiplier.int), bits, signed=False)
# Base case of 0 bit multiplication. If length is 0 product is 0.
if len(multiplier.bin) == 0 or len(multiplicand.bin) == 0:
return "0"
# Base case of 1 bit multiplication
if len(multiplier) == 1:
return "1" if (
multiplier.bin == "1" and multiplicand.bin == "1") else "0"
m = int(bits / 2)
# x = x1*(2**m) + x0
# y = y1*(2**m) + y0
x1 = multiplicand.bin[:m]
x0 = multiplicand.bin[m:]
y1 = multiplier.bin[:m]
y0 = multiplier.bin[m:]
# Upper half of the bits
z2 = karatsuba_multiply(x1, y1)
# Lower half of the bits
z0 = karatsuba_multiply(x0, y0)
# ( x1 + x0 )( y1 + y0 )
sum_term1 = int(x1, 2) + int(x0, 2)
sum_term1 = bin(sum_term1).replace("0b", "").lstrip("0")
sum_term2 = int(y1, 2) + int(y0, 2)
sum_term2 = bin(sum_term2).replace("0b", "").lstrip("0")
z1 = karatsuba_multiply(sum_term1, sum_term2)
z1 = bin(int(z1, 2) - int(z2, 2) - int(z0, 2))
# The "0" padding at the right is binary equivalent of left shift or
# muliply with 2**bits
abs_result = int((z2 + "0" * (2 * (bits - m))),
2) + int((z1 + "0" * (bits - m)),
2) + int(z0,
2)
# len_result = 2*length of multiplicand / multiplier
len_result = 2 * bits
# Converting to binary of 2ce the bit length of inputs
abs_result = BinPyBits(abs_result, len_result, signed=False)
if sign_bit == 1:
abs_result.int *= -1
return abs_result.bin
def toom3_multiply(multiplier, multiplicand, bits=None, signed=False):
"""
Multiply the multiplicand ( binary represented ) and multiplier ( binary represented )
based on Toom Cook's multiplication algorithm - ( Toom-3; Km = Kn = 3 ).
If the signed is True, the input_data is assumed to be in a signed binary representation.
The result is a signed binary string representing the product.
USAGE:
# Both Positive inputs
>>> toom3_multiply('0101', '0111')
'0000100011'
# Passing 2s Complement Signed binary string as one input
>>> product = toom3_multiply('0101','1001')
>>> print ( product )
'1111011101'
>>> to_signed_int(product)
-35
# Passing - signed binary string
>>> product = toom3_multiply('101','-111')
>>> print ( product )
'1111011101'
>>> to_signed_int(product)
-35
"""
# print multiplicand, multiplier
if bits is None:
# DEV NOTE: Do not replace 0b in the inputs, the BinPyBits will
# handle the same.
if not signed:
if "0b" not in multiplier:
multiplier = multiplier.lstrip("0")
if "0b" not in multiplicand:
multiplicand = multiplicand.lstrip("0")
bits = max(len(multiplier), len(multiplicand))
if bits == len(multiplier) and "0b" in multiplier:
bits -= 2
elif bits == len(multiplicand) and "0b" in multiplicand:
bits -= 2
if bits == 0:
bits = 1
len_input = bits
while bits % 3 != 0:
bits += 1
multiplicand = BinPyBits(multiplicand, bits, signed)
multiplier = BinPyBits(multiplier, bits, signed)
sign_bit = None
if (signed):
# The following set of operations will run only during the first
# pass, if the signed is set to True
# Calculating the sign of the product
if ((multiplicand.bin[0] == "1") ^ (multiplier.bin[0] == "1")):
sign_bit = 1
else:
sign_bit = 0
# Strip off the sign bit and convert to a BitArray with unsigned
# binary string.
multiplicand = BinPyBits(
abs(multiplicand.int), bits, signed=False)
multiplier = BinPyBits(
abs(multiplier.int), bits, signed=False)
# Base case of 0 bit multiplication. If length is 0 product is 0.
if len(
multiplier.bin.lstrip("0")) == 0 or len(
multiplicand.bin.lstrip("0")) == 0:
return "0"
# Base case of 1 bit multiplication
if len(multiplier.bin.lstrip("0")) == 1:
return "1" if (
multiplier.bin[-1] == "1" and multiplicand.bin[-1] == "1") else "0"
m = int(bits / 3)
B = 2 ** m
# x = x2*(B**2) + x1*(B) + x0 <= p(x = B)
# y = y2*(B**2) + y1*(B) + y0 <= q(x = B)
x2 = BinPyBits(multiplicand.bin[:m])
x1 = BinPyBits(multiplicand.bin[m:2 * m])
x0 = BinPyBits(multiplicand.bin[2 * m:])
y2 = BinPyBits(multiplier.bin[:m])
y1 = BinPyBits(multiplier.bin[m:2 * m])
y0 = BinPyBits(multiplier.bin[2 * m:])
# Multipoint coefficient evaluation using Marco Bodrato's sequence
# p is the multipoint dict of cartesian pairs for the multiplicand polynomial
# q is the multipoint dict of cartesian pairs for the multiplier polynomial
p = {}
q = {}
p_o = BinPyBits(x0.uint + x2.uint)
p[0] = x0
p[1] = BinPyBits(p_o.uint + x1.uint)
p[-1] = BinPyBits(p_o.uint - x1.uint) # Can be negative
p[-2] = BinPyBits(2 * (p[-1].uint + x2.uint) - x0.uint) # Can be negative
p['inf'] = x2
q_o = BinPyBits(x0.uint + x2.uint)
q[0] = x0
q[1] = BinPyBits(q_o.uint + x1.uint)
q[-1] = BinPyBits(q_o.uint - x1.uint) # Can be negative
q[-2] = BinPyBits(2 * (q[-1].uint + x2.uint) - x0.uint) # Can be negative
q['inf'] = x2
# Calculating pointwise multiplication - Recursively calling toom3 for
# smaller part multiplications
r = {}
# print p, q
r[0] = BinPyBits(toom3_multiply(p[0].bin, q[0].bin))
r[1] = BinPyBits(toom3_multiply(p[1].bin, q[1].bin))
r[-1] = BinPyBits(toom3_multiply(p[-1].bin, q[-1].bin))
r[-2] = BinPyBits(toom3_multiply(p[-2].bin, q[-2].bin))
r['inf'] = BinPyBits(toom3_multiply(p['inf'].bin, q['inf'].bin))
# Interpolating the points to the product polynomial
# Using Marco Bodrato's sequence
# Integer ( may be negative too ) result of the point multiplication is
# used to interpolate
r0 = r[0].int
r4 = r['inf'].int
r3 = (r[-2].int - r[1].int) / 3
r1 = (r[1].int - r[-1].int) / 2
r2 = r[-1].int - r[0].int
r3 = (r2 - r3) / 2 + 2 * r['inf']
r2 = r2 + r1 - r4
r1 = r1 - r3
product = r4 * (4 * B) + r3 * (3 * B) + r2 * (2 * B) + r1 * (B) + r0
product = BinPyBits(product, 2 * len_input)
if sign_bit == 1:
product.int *= -1
return product.bin
|
|
# Copyright (c) 2010 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import simplejson as json
except ImportError:
import json
import unittest
from swift.common.swob import Request, Response
from swift.common.middleware import staticweb
meta_map = {
'c1': {'status': 401},
'c2': {},
'c3': {'meta': {'web-index': 'index.html',
'web-listings': 't'}},
'c3b': {'meta': {'web-index': 'index.html',
'web-listings': 't'}},
'c4': {'meta': {'web-index': 'index.html',
'web-error': 'error.html',
'web-listings': 't',
'web-listings-css': 'listing.css',
'web-directory-type': 'text/dir'}},
'c5': {'meta': {'web-index': 'index.html',
'web-error': 'error.html',
'web-listings': 't',
'web-listings-css': 'listing.css'}},
'c6': {'meta': {'web-listings': 't'}},
'c7': {'meta': {'web-listings': 'f'}},
'c8': {'meta': {'web-error': 'error.html',
'web-listings': 't',
'web-listings-css':
'http://localhost/stylesheets/listing.css'}},
'c9': {'meta': {'web-error': 'error.html',
'web-listings': 't',
'web-listings-css':
'/absolute/listing.css'}},
'c10': {'meta': {'web-listings': 't'}},
'c11': {'meta': {'web-index': 'index.html'}},
'c11a': {'meta': {'web-index': 'index.html',
'web-directory-type': 'text/directory'}},
'c12': {'meta': {'web-index': 'index.html',
'web-error': 'error.html'}},
}
def mock_get_container_info(env, app, swift_source='SW'):
container = env['PATH_INFO'].rstrip('/').split('/')[3]
container_info = meta_map[container]
container_info.setdefault('status', 200)
container_info.setdefault('read_acl', '.r:*')
return container_info
class FakeApp(object):
def __init__(self, status_headers_body_iter=None):
self.calls = 0
self.get_c4_called = False
def __call__(self, env, start_response):
self.calls += 1
if env['PATH_INFO'] == '/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1':
return Response(
status='412 Precondition Failed')(env, start_response)
elif env['PATH_INFO'] == '/v1/a':
return Response(status='401 Unauthorized')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c1':
return Response(status='401 Unauthorized')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c2':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c2/one.txt':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/index.html':
return Response(status='200 Ok', body='''
<html>
<body>
<h1>Test main index.html file.</h1>
<p>Visit <a href="subdir">subdir</a>.</p>
<p>Don't visit <a href="subdir2/">subdir2</a> because it doesn't really
exist.</p>
<p>Visit <a href="subdir3">subdir3</a>.</p>
<p>Visit <a href="subdir3/subsubdir">subdir3/subsubdir</a>.</p>
</body>
</html>
''')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3b':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3b/index.html':
resp = Response(status='204 No Content')
resp.app_iter = iter([])
return resp(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir/index.html':
return Response(status='200 Ok', body='index file')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirx/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirx/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdiry/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdiry/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirz':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirz/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/unknown':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/unknown/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4':
self.get_c4_called = True
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/one.txt':
return Response(status='200 Ok',
headers={'x-object-meta-test': 'value'},
body='1')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/two.txt':
return Response(status='503 Service Unavailable')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c4/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/subdir/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/unknown':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/unknown/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/404error.html':
return Response(status='200 Ok', body='''
<html>
<body style="background: #000000; color: #ffaaaa">
<p>Chrome's 404 fancy-page sucks.</p>
</body>
</html>
'''.strip())(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/index.html':
return Response(status='503 Service Unavailable')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c5/503error.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/unknown':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/unknown/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c5/404error.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c6':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c6/subdir':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c7', '/v1/a/c7/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c8', '/v1/a/c8/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c8/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c9', '/v1/a/c9/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c9/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c10', '/v1/a/c10/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c10/\xe2\x98\x83/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c10/\xe2\x98\x83/\xe2\x98\x83/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c11', '/v1/a/c11/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir/':
return Response(status='200 Ok', headers={'Content-Type':\
'application/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir/index.html':
return Response(status='200 Ok', body='''
<html>
<body>
<h2>c11 subdir index</h2>
</body>
</html>
'''.strip())(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir2/':
return Response(status='200 Ok', headers={'Content-Type':\
'application/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11/subdir2/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] in ('/v1/a/c11a', '/v1/a/c11a/'):
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir/':
return Response(status='200 Ok', headers={'Content-Type':\
'text/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir2/':
return Response(status='200 Ok', headers={'Content-Type':\
'application/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir2/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir3/':
return Response(status='200 Ok', headers={'Content-Type':\
'not_a/directory'})(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c11a/subdir3/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c12/index.html':
return Response(status='200 Ok', body='index file')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c12/200error.html':
return Response(status='200 Ok', body='error file')(env,
start_response)
else:
raise Exception('Unknown path %r' % env['PATH_INFO'])
def listing(self, env, start_response):
headers = {'x-container-read': '.r:*'}
if env['PATH_INFO'] in ('/v1/a/c3', '/v1/a/c4', '/v1/a/c8', \
'/v1/a/c9') and \
env['QUERY_STRING'] == 'delimiter=/&format=json&prefix=subdir/':
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"subdir/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"},
{"name":"subdir/2.txt",
"hash":"c85c1dcd19cf5cbac84e6043c31bb63e", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.734140"},
{"subdir":"subdir3/subsubdir/"}]
'''.strip()
elif env['PATH_INFO'] == '/v1/a/c3' and env['QUERY_STRING'] == \
'delimiter=/&format=json&prefix=subdiry/':
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'Content-Type': 'application/json; charset=utf-8'})
body = '[]'
elif env['PATH_INFO'] == '/v1/a/c3' and env['QUERY_STRING'] == \
'limit=1&format=json&delimiter=/&limit=1&prefix=subdirz/':
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"subdirz/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"}]
'''.strip()
elif env['PATH_INFO'] == '/v1/a/c6' and env['QUERY_STRING'] == \
'limit=1&format=json&delimiter=/&limit=1&prefix=subdir/':
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'X-Container-Web-Listings': 't',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"subdir/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"}]
'''.strip()
elif env['PATH_INFO'] == '/v1/a/c10' and (env['QUERY_STRING'] == \
'delimiter=/&format=json&prefix=%E2%98%83/' or
env['QUERY_STRING'] == \
'delimiter=/&format=json&prefix=%E2%98%83/%E2%98%83/'):
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'X-Container-Read': '.r:*',
'X-Container-Web-Listings': 't',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"\u2603/\u2603/one.txt",
"hash":"73f1dd69bacbf0847cc9cffa3c6b23a1", "bytes":22,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"},
{"subdir":"\u2603/\u2603/"}]
'''.strip()
elif 'prefix=' in env['QUERY_STRING']:
return Response(status='204 No Content')(env, start_response)
elif 'format=json' in env['QUERY_STRING']:
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'Content-Type': 'application/json; charset=utf-8'})
body = '''
[{"name":"401error.html",
"hash":"893f8d80692a4d3875b45be8f152ad18", "bytes":110,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.713710"},
{"name":"404error.html",
"hash":"62dcec9c34ed2b347d94e6ca707aff8c", "bytes":130,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.720850"},
{"name":"index.html",
"hash":"8b469f2ca117668a5131fe9ee0815421", "bytes":347,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.683590"},
{"name":"listing.css",
"hash":"7eab5d169f3fcd06a08c130fa10c5236", "bytes":17,
"content_type":"text/css",
"last_modified":"2011-03-24T04:27:52.721610"},
{"name":"one.txt", "hash":"73f1dd69bacbf0847cc9cffa3c6b23a1",
"bytes":22, "content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.722270"},
{"name":"subdir/1.txt",
"hash":"5f595114a4b3077edfac792c61ca4fe4", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.709100"},
{"name":"subdir/2.txt",
"hash":"c85c1dcd19cf5cbac84e6043c31bb63e", "bytes":20,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.734140"},
{"name":"subdir/\u2603.txt",
"hash":"7337d028c093130898d937c319cc9865", "bytes":72981,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.735460"},
{"name":"subdir2", "hash":"d41d8cd98f00b204e9800998ecf8427e",
"bytes":0, "content_type":"text/directory",
"last_modified":"2011-03-24T04:27:52.676690"},
{"name":"subdir3/subsubdir/index.html",
"hash":"04eea67110f883b1a5c97eb44ccad08c", "bytes":72,
"content_type":"text/html",
"last_modified":"2011-03-24T04:27:52.751260"},
{"name":"two.txt", "hash":"10abb84c63a5cff379fdfd6385918833",
"bytes":22, "content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.825110"},
{"name":"\u2603/\u2603/one.txt",
"hash":"73f1dd69bacbf0847cc9cffa3c6b23a1", "bytes":22,
"content_type":"text/plain",
"last_modified":"2011-03-24T04:27:52.935560"}]
'''.strip()
else:
headers.update({'X-Container-Object-Count': '12',
'X-Container-Bytes-Used': '73763',
'Content-Type': 'text/plain; charset=utf-8'})
body = '\n'.join(['401error.html', '404error.html', 'index.html',
'listing.css', 'one.txt', 'subdir/1.txt',
'subdir/2.txt', u'subdir/\u2603.txt', 'subdir2',
'subdir3/subsubdir/index.html', 'two.txt',
u'\u2603/\u2603/one.txt'])
return Response(status='200 Ok', headers=headers,
body=body)(env, start_response)
class TestStaticWeb(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.test_staticweb = staticweb.filter_factory({})(self.app)
self._orig_get_container_info = staticweb.get_container_info
staticweb.get_container_info = mock_get_container_info
def tearDown(self):
staticweb.get_container_info = self._orig_get_container_info
def test_app_set(self):
app = FakeApp()
sw = staticweb.filter_factory({})(app)
self.assertEquals(sw.app, app)
def test_conf_set(self):
conf = {'blah': 1}
sw = staticweb.filter_factory(conf)(FakeApp())
self.assertEquals(sw.conf, conf)
def test_root(self):
resp = Request.blank('/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 404)
def test_version(self):
resp = Request.blank('/v1').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 412)
def test_account(self):
resp = Request.blank('/v1/a').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 401)
def test_container1(self):
resp = Request.blank('/v1/a/c1').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 401)
def test_container1_web_mode_explicitly_off(self):
resp = Request.blank('/v1/a/c1',
headers={'x-web-mode': 'false'}).get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 401)
def test_container1_web_mode_explicitly_on(self):
resp = Request.blank('/v1/a/c1',
headers={'x-web-mode': 'true'}).get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 404)
def test_container2(self):
resp = Request.blank('/v1/a/c2').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(len(resp.body.split('\n')),
int(resp.headers['x-container-object-count']))
def test_container2_web_mode_explicitly_off(self):
resp = Request.blank('/v1/a/c2',
headers={'x-web-mode': 'false'}).get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'text/plain')
self.assertEquals(len(resp.body.split('\n')),
int(resp.headers['x-container-object-count']))
def test_container2_web_mode_explicitly_on(self):
resp = Request.blank('/v1/a/c2',
headers={'x-web-mode': 'true'}).get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 404)
def test_container2onetxt(self):
resp = Request.blank(
'/v1/a/c2/one.txt').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 404)
def test_container2json(self):
resp = Request.blank(
'/v1/a/c2?format=json').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(len(json.loads(resp.body)),
int(resp.headers['x-container-object-count']))
def test_container2json_web_mode_explicitly_off(self):
resp = Request.blank('/v1/a/c2?format=json',
headers={'x-web-mode': 'false'}).get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(len(json.loads(resp.body)),
int(resp.headers['x-container-object-count']))
def test_container2json_web_mode_explicitly_on(self):
resp = Request.blank('/v1/a/c2?format=json',
headers={'x-web-mode': 'true'}).get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 404)
def test_container3(self):
resp = Request.blank('/v1/a/c3').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 301)
self.assertEquals(resp.headers['location'],
'http://localhost/v1/a/c3/')
def test_container3indexhtml(self):
resp = Request.blank('/v1/a/c3/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('Test main index.html file.' in resp.body)
def test_container3subsubdir(self):
resp = Request.blank(
'/v1/a/c3/subdir3/subsubdir').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 301)
def test_container3subsubdircontents(self):
resp = Request.blank(
'/v1/a/c3/subdir3/subsubdir/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.body, 'index file')
def test_container3subdir(self):
resp = Request.blank(
'/v1/a/c3/subdir/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('Listing of /v1/a/c3/subdir/' in resp.body)
self.assert_('</style>' in resp.body)
self.assert_('<link' not in resp.body)
self.assert_('listing.css' not in resp.body)
def test_container3subdirx(self):
resp = Request.blank(
'/v1/a/c3/subdirx/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 404)
def test_container3subdiry(self):
resp = Request.blank(
'/v1/a/c3/subdiry/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 404)
def test_container3subdirz(self):
resp = Request.blank(
'/v1/a/c3/subdirz').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 301)
def test_container3unknown(self):
resp = Request.blank(
'/v1/a/c3/unknown').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 404)
self.assert_("Chrome's 404 fancy-page sucks." not in resp.body)
def test_container3bindexhtml(self):
resp = Request.blank('/v1/a/c3b/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 204)
self.assertEquals(resp.body, '')
def test_container4indexhtml(self):
resp = Request.blank('/v1/a/c4/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('Listing of /v1/a/c4/' in resp.body)
self.assert_('href="listing.css"' in resp.body)
def test_container4indexhtmlauthed(self):
resp = Request.blank('/v1/a/c4').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 301)
resp = Request.blank('/v1/a/c4',
environ={'REMOTE_USER': 'authed'}).get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
resp = Request.blank('/v1/a/c4', headers={'x-web-mode': 't'},
environ={'REMOTE_USER': 'authed'}).get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 301)
def test_container4unknown(self):
resp = Request.blank(
'/v1/a/c4/unknown').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 404)
self.assert_("Chrome's 404 fancy-page sucks." in resp.body)
def test_container4subdir(self):
resp = Request.blank(
'/v1/a/c4/subdir/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('Listing of /v1/a/c4/subdir/' in resp.body)
self.assert_('</style>' not in resp.body)
self.assert_('<link' in resp.body)
self.assert_('href="../listing.css"' in resp.body)
self.assertEquals(resp.headers['content-type'],
'text/html; charset=UTF-8')
def test_container4onetxt(self):
resp = Request.blank(
'/v1/a/c4/one.txt').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
def test_container4twotxt(self):
resp = Request.blank(
'/v1/a/c4/two.txt').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 503)
def test_container5indexhtml(self):
resp = Request.blank('/v1/a/c5/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 503)
def test_container5unknown(self):
resp = Request.blank(
'/v1/a/c5/unknown').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 404)
self.assert_("Chrome's 404 fancy-page sucks." not in resp.body)
def test_container6subdir(self):
resp = Request.blank(
'/v1/a/c6/subdir').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 301)
def test_container7listing(self):
resp = Request.blank('/v1/a/c7/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 404)
def test_container8listingcss(self):
resp = Request.blank(
'/v1/a/c8/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('Listing of /v1/a/c8/' in resp.body)
self.assert_('<link' in resp.body)
self.assert_(
'href="http://localhost/stylesheets/listing.css"' in resp.body)
def test_container8subdirlistingcss(self):
resp = Request.blank(
'/v1/a/c8/subdir/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('Listing of /v1/a/c8/subdir/' in resp.body)
self.assert_('<link' in resp.body)
self.assert_(
'href="http://localhost/stylesheets/listing.css"' in resp.body)
def test_container9listingcss(self):
resp = Request.blank(
'/v1/a/c9/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('Listing of /v1/a/c9/' in resp.body)
self.assert_('<link' in resp.body)
self.assert_('href="/absolute/listing.css"' in resp.body)
def test_container9subdirlistingcss(self):
resp = Request.blank(
'/v1/a/c9/subdir/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('Listing of /v1/a/c9/subdir/' in resp.body)
self.assert_('<link' in resp.body)
self.assert_('href="/absolute/listing.css"' in resp.body)
def test_container10unicodesubdirlisting(self):
resp = Request.blank(
'/v1/a/c10/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('Listing of /v1/a/c10/' in resp.body)
resp = Request.blank(
'/v1/a/c10/\xe2\x98\x83/').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('Listing of /v1/a/c10/\xe2\x98\x83/' in resp.body)
resp = Request.blank(
'/v1/a/c10/\xe2\x98\x83/\xe2\x98\x83/'
).get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_(
'Listing of /v1/a/c10/\xe2\x98\x83/\xe2\x98\x83/' in resp.body)
def test_container11subdirmarkerobjectindex(self):
resp = Request.blank('/v1/a/c11/subdir/').get_response(
self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('<h2>c11 subdir index</h2>' in resp.body)
def test_container11subdirmarkermatchdirtype(self):
resp = Request.blank('/v1/a/c11a/subdir/').get_response(
self.test_staticweb)
self.assertEquals(resp.status_int, 404)
def test_container11subdirmarkeraltdirtype(self):
resp = Request.blank('/v1/a/c11a/subdir2/').get_response(
self.test_staticweb)
self.assertEquals(resp.status_int, 200)
def test_container11subdirmarkerinvaliddirtype(self):
resp = Request.blank('/v1/a/c11a/subdir3/').get_response(
self.test_staticweb)
self.assertEquals(resp.status_int, 200)
def test_container12unredirectedrequest(self):
resp = Request.blank('/v1/a/c12/').get_response(
self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assert_('index file' in resp.body)
def test_subrequest_once_if_possible(self):
resp = Request.blank(
'/v1/a/c4/one.txt').get_response(self.test_staticweb)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.headers['x-object-meta-test'], 'value')
self.assertEquals(resp.body, '1')
self.assertEquals(self.app.calls, 1)
if __name__ == '__main__':
unittest.main()
|
|
import unittest
import copy
from xformmanager.xformdef import FormDef, ElementDef, Differences
class CompatibilityTestCase(unittest.TestCase):
def testEmptyWithEmpty(self):
self._do_two_way_compatibility_check(FormDef(), FormDef(), True)
def testEmptyWithFilled(self):
filled = self._get_basic_formdef()
self._do_two_way_compatibility_check(filled, FormDef(), False)
def testBasicFilledTwoWay(self):
filled = self._get_basic_formdef()
self._do_two_way_compatibility_check(filled, copy.deepcopy(filled), True)
def testBasicAttributes(self):
filled = self._get_basic_formdef()
# todo: verify/modify these lists
version_changing_attrs = [ "xpath", "name", "type"]
nonversion_changing_attrs = [ "target_namespace", "version", "uiversion", "tag" ]
for attr in version_changing_attrs:
fcopy = copy.deepcopy(filled)
# we use an string in case there are parsing exceptions
# we may need to special case this better
setattr(fcopy, attr, "99")
self._do_two_way_compatibility_check(filled, fcopy, False)
for attr in nonversion_changing_attrs:
fcopy = copy.deepcopy(filled)
# ditto above.
setattr(fcopy, attr, "99")
self._do_two_way_compatibility_check(filled, fcopy, True)
def testChildAttributes(self):
child = self._get_basic_elementdef()
ccopy = copy.deepcopy(child)
filled = self._get_basic_formdef(child_elements = [child])
fcopy = copy.deepcopy(filled)
fcopy.child_elements = [ccopy]
self._do_two_way_compatibility_check(filled, fcopy, True)
self._do_child_attribute_check(filled, fcopy, ccopy)
def testChildOrdering(self):
child1 = self._get_basic_elementdef(name="name1", xpath="xpath1")
child2 = self._get_basic_elementdef(name="name2", xpath="xpath2")
filled = self._get_basic_formdef(child_elements=[child1, child2])
# first make sure this checks out fine with multiple children
fcopy = copy.deepcopy(filled)
self._do_two_way_compatibility_check(filled, fcopy, True)
# now reorder the children, it should still pass
fcopy.child_elements = [child2, child1]
self._do_two_way_compatibility_check(filled, fcopy, True)
def testChildAdditions(self):
child1 = self._get_basic_elementdef(name="name1", xpath="xpath1")
child2 = self._get_basic_elementdef(name="name2", xpath="xpath2")
child3 = self._get_basic_elementdef(name="name3", xpath="xpath3")
filled = self._get_basic_formdef(child_elements=[child1, child2])
# first make sure this checks out fine with multiple children
fcopy = copy.deepcopy(filled)
fcopy.child_elements = [child1]
self._do_two_way_compatibility_check(filled, fcopy, False)
fcopy.child_elements = [child1, child2, child3]
self._do_two_way_compatibility_check(filled, fcopy, False)
# do not check duplicates, since valid xsd schema will not have duplicates
# fcopy.child_elements = [child1, child2, child1]
# self._do_two_way_compatibility_check(filled, fcopy, False)
# make sure it was the inconsistent child elements that were failing
fcopy.child_elements = [child1, child2]
self._do_two_way_compatibility_check(filled, fcopy, True)
def testSubChildren(self):
child1 = self._get_basic_elementdef(name="name1", xpath="xpath1")
subchild1 = self._get_basic_elementdef(name="subname1", xpath="subpath1")
subchild2 = self._get_basic_elementdef(name="subname2", xpath="subpath2")
child1.child_elements=[subchild1, subchild2]
child2 = self._get_basic_elementdef(name="name2", xpath="xpath2")
subchild3 = self._get_basic_elementdef(name="subname3", xpath="subpath3")
subchild4 = self._get_basic_elementdef(name="subname4", xpath="subpath4")
child2.child_elements = [subchild3, subchild4]
c1copy = copy.deepcopy(child1)
c2copy = copy.deepcopy(child2)
filled = self._get_basic_formdef(child_elements=[child1, child2])
fcopy = copy.deepcopy(filled)
# all is the same, should pass
self._do_two_way_compatibility_check(filled, fcopy, True)
# run through the standard compatibility checks on all children,
# all subchildren
for child in [child1, child2, subchild1, subchild2, subchild3, subchild4]:
self._do_child_attribute_check(filled, fcopy, child)
# test ordering of super children, and then of sub children
fcopy.child_elements = [c2copy, c1copy]
self._do_two_way_compatibility_check(filled, fcopy, True)
# ordering should pass
c1copy.child_elements = [subchild2, subchild1]
self._do_two_way_compatibility_check(filled, fcopy, True)
# removal should not
c1copy.child_elements = [subchild1]
self._do_two_way_compatibility_check(filled, fcopy, False)
# additions should not
c1copy.child_elements = [subchild2, subchild1, subchild3]
self._do_two_way_compatibility_check(filled, fcopy, False)
#Forget about duplicates
#c1copy.child_elements = [subchild2, subchild1, subchild1]
#self._do_two_way_compatibility_check(filled, fcopy, False)
# swapping should not
c1copy.child_elements = [subchild1, subchild3]
self._do_two_way_compatibility_check(filled, fcopy, False)
# moving children to different elements should not
c2copy.child_elements = [subchild2, subchild4]
self._do_two_way_compatibility_check(filled, fcopy, False)
# finally run through basic tests with a subsubchild
# let's get our copies back in our expected state:
c1copy.child_elements = [subchild1, subchild2]
c2copy.child_elements = [subchild3, subchild4]
self._do_two_way_compatibility_check(filled, fcopy, True)
subsubchild = self._get_basic_elementdef(name="subsubname1", xpath="subsubpath1")
subchild1.child_elements = [subsubchild]
# fcopy actually points to subchild1. so we need to make a new copy.
subchild1_copy = copy.deepcopy(subchild1)
subsubchild_copy = copy.deepcopy(subsubchild)
subchild1_copy.child_elements = [subsubchild_copy]
c1copy.child_elements = [subchild1_copy, subchild2]
self._do_child_attribute_check(filled, fcopy, subsubchild)
def _do_child_attribute_check(self, filled, fcopy, ccopy):
"""Checks all the version attributes of an element against a form.
Any version changing attribute should result in a compatibility
error, while any non-version changing attribute should not.
Assumes the element passed in is referenced inside one of the forms,
but it could be arbitrarily nested"""
# todo: verify/modify these lists
version_changing_attrs = ["xpath", "name", "type", "is_repeatable" ]
nonversion_changing_attrs = ["tag"]
for attr in version_changing_attrs:
prev_val = getattr(ccopy, attr)
# same caveat as above applies
setattr(ccopy, attr, "99")
self._do_two_way_compatibility_check(filled, fcopy, False)
# make sure to set it back after we check
setattr(ccopy, attr, prev_val)
for attr in nonversion_changing_attrs:
prev_val = getattr(ccopy, attr)
# same caveat as above applies
setattr(ccopy, attr, "99")
self._do_two_way_compatibility_check(filled, fcopy, True)
# make sure to set it back after we check
setattr(ccopy, attr, prev_val)
def _get_basic_elementdef(self, name="a name", xpath = "xpath",
child_elements=[], allowable_values=[],
type="type", is_repeatable=False,
min_occurs=0, tag="tag"):
"""Make an elementdef, with as many or as few custom parameters as you want"""
to_return = ElementDef()
to_return.name = name
to_return.xpath = xpath
to_return.child_elements = child_elements
to_return.allowable_values = allowable_values
to_return.type = type
to_return.is_repeatable = is_repeatable
to_return.min_occurs = min_occurs
to_return.tag = tag
return to_return
def _get_basic_formdef(self, name="a name", xpath = "xpath",
child_elements=[], allowable_values=[],
type="type", is_repeatable=False, types={},
version=1, uiversion=1,
target_namespace="uri://my_xmlns"):
"""Make a formdef, with as many or as few custom parameters as you want"""
to_return = FormDef()
to_return.name = name
to_return.xpath = xpath
to_return.child_elements = child_elements
to_return.allowable_values = allowable_values
to_return.type = type
to_return.is_repeatable = is_repeatable
to_return.types = types
to_return.version = version
to_return.uiversion = uiversion
to_return.target_namespace = target_namespace
return to_return
def _do_two_way_compatibility_check(self, fd1, fd2, are_compatible):
# all compatibility checks should be two-way, this is a symmetric
# operation.
should_they = "should" if are_compatible else "should NOT"
self.assertEqual(are_compatible, fd1.is_compatible_with(fd2),
"%s and %s %s be compatible" % (fd1, fd2, should_they))
self.assertEqual(are_compatible, fd2.is_compatible_with(fd1),
"%s and %s %s be compatible" % (fd2, fd1, should_they))
|
|
#!/usr/bin/env python
import sys
import logging
import multiprocessing
from itertools import imap, chain, islice
import argparse
import numpy as np
import h5py
import gomill
import gomill.sgf, gomill.sgf_moves
from gomill.gtp_states import History_move
from deepgo import cubes, state, rank
"""
This reads sgf's from stdin, processes them in a parallel manner to extract
pairs (cube_encoding_position, move_to_play) and writes the data into a file.
Some comments about speed:
Most time in workers is currently spent in routines for analysing the goban
in the cubes submodule, where we analyse each position independently of
the previous ones, while we could build strings/liberties data structures
incrementaly and thus save resources.
The workers do however scale up linearly with number of cores. What does not
and what is the actual bottleneck on multicore machine (with slower & bigger
cubes, such as the tian_zhu_2015 cube) is the serial HDF file io and compression
in the master process. Truly parallel implementation using MPI is planned.
Currently, you can easily process 200 000 games in under a 24 hours on 4-core
commodity laptop. The dataset is created (almost) only once and you will
probably be spending much more time training the CNN anyway.
"""
def flatten(list_of_lists):
return chain.from_iterable(list_of_lists)
def init_subprocess(plane, label, allowed_boardsizes, allowed_ranks):
global get_cube, get_label, board_filter, ranks_filter
get_cube = cubes.reg_cube[plane]
get_label = cubes.reg_label[label]
board_filter = lambda board : board.side in allowed_boardsizes
def filter_one_rank(rank):
if allowed_ranks is None:
return True
if not rank:
return None in allowed_ranks
return rank.key() in allowed_ranks
def ranks_filter(brwr):
return all(map(filter_one_rank, brwr))
def get_rank(root_node, key):
try:
prop = root_node.get(key)
except:
return None
return rank.Rank.from_string(prop, True)
def process_game(sgf_fn):
sgf_fn = sgf_fn.strip()
try :
with open(sgf_fn, 'r') as fin:
game = gomill.sgf.Sgf_game.from_string(fin.read())
logging.info("Processing '%s'"%sgf_fn)
board, moves = gomill.sgf_moves.get_setup_and_moves(game)
except Exception as e:
logging.warn("Error processing '%s': %s"%(sgf_fn, str(e)))
return None
if not board_filter(board) or not moves:
logging.info("Skipping game '%s': boardsize not allowed"%(sgf_fn))
return None
root = game.get_root()
ranks = rank.BrWr(get_rank(root, 'BR'),
get_rank(root, 'WR'))
if not ranks_filter(ranks):
logging.info("Skipping game '%s': rank not allowed"%(sgf_fn))
return None
Xs = []
ys = []
ko_move = None
history = []
for num, (player, move) in enumerate(moves):
# pass
if not move:
break
try:
# encode current position
s = state.State(board, ko_move, history, moves[num:len(moves)], ranks)
x = get_cube(s, player)
# get y data from future moves
# (usually only first element will be taken in account)
y = get_label(s, player)
except cubes.SkipGame as e:
logging.info("Skipping game '%s': %s"%(sgf_fn, str(e)))
return None
except Exception as e:
logging.exception("Error encoding '%s' - move %d"%(sgf_fn, num + 1))
# TODO Should we use the data we have already?
return None
# None skips
if x is not None and y is not None:
Xs.append(x)
ys.append(y)
row, col = move
try:
ko_move = board.play(row, col, player)
except Exception as e:
logging.warn("Error re-playing '%s' - move %d : '%s'"%(sgf_fn, num + 1, str(e)))
# this basically means that the game has illegal moves
# lets skip it altogether in case it is garbled
return None
history.append(History_move(player, move))
return Xs, ys
def parse_rank_specification(s):
"""
Parses info about rank specification, used to filter games by player's ranks.
Returns None (all ranks allowed),
or a set of possible values
(None as a possible value in the set means that we should include games without rank info)
# returns None, all ranks possible
parse_rank_specification('')
# returns set([1, 2, 3, None])), 1, 2, 3 allowed, as well as missing rank info
parse_rank_specification('1..3,')
# returns set([None])), only games WITHOUT rank info are allowed
parse_rank_specification(',')
See test for more examples.
"""
if not s:
return None
ret = []
s = s.replace(' ','')
categories = s.split(',')
for cat in categories:
cs = cat.split('..')
try:
if len(cs) == 1:
if not cs[0]:
ret.append(None)
else:
ret.append(int(cs[0]))
elif len(cs) == 2:
fr, to = map(int,cs)
if to < fr:
raise RuntimeError('Empty range %s'%(cat))
ret.extend(range(fr, to+1))
else:
raise ValueError()
except ValueError:
raise RuntimeError('Could not parse rank info on token "%s"'%(cat))
return set(ret)
class RankSpecAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(RankSpecAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, parse_rank_specification(values))
def parse_args():
parser = argparse.ArgumentParser(
description='Processes sgf to create datasets for teaching Deep'
' Neural Networks to play the game of Go.'
' Each sgf file is read from STDIN, analysed and an'
' (X, y) pair is created from each position, where'
' X is the cube encoding position and y the desired'
' move. The results are written to HDF5 file.')
parser.add_argument('filename', metavar='FILENAME',
help='HDF5 FILENAME to store the dataset to')
parser.add_argument('-x', '--x-name', dest='xname',
help='HDF5 dataset name to store the xs to',
default='xs')
parser.add_argument('-y', '--y-name', dest='yname',
help='HDF5 dataset name to store the ys to',
default='ys')
parser.add_argument('-p', '--plane', type=str, choices=cubes.reg_cube.keys(),
default='clark_storkey_2014',
help='specify which method should be used to create the planes')
parser.add_argument('-l', '--label', type=str, choices=cubes.reg_label.keys(),
default='simple_label',
help='specify which method should be used to create the labels')
parser.add_argument('-q', '--quiet', dest='quiet', action='store_true',
default=False,
help='turn off the (stderr) debug logs')
parser.add_argument('-s', dest='boardsize', type=int,
help='specify boardsize', default=19)
parser.add_argument('-r', '--rank', dest='rankspec', action=RankSpecAction,
help='Specify rank to be limited. 1kyu=1, 30kyu=30, 1dan=0,'
' 10dan=-9. Example values "1,2,3", "1..30", "-10..30",'
' etc. Empty string marks no limit on rank.',
default=parse_rank_specification(''))
parser.add_argument('--flatten', dest='flatten', action='store_true',
help='Flatten out the examples. (19, 19, 4) shape becomes ( 19 * 19 * 4,)', default=False)
parser.add_argument('--shrink-units', dest='shrink_units', action='store_true',
help='Shrinks unit dimension label (or, unlikely, feature) arrays.'
' Only if the unit dimension is the only one in the example,'
' so (19,19,1) is not shrinked, but (1,) is.', default=False)
parser.add_argument('--dtype', dest='dtype',
help='convert dtype of stored data to given numpy dtype (instead the default value defined by plane/label)', default=None)
parser.add_argument('--compression', dest='compression',
help='Possible values: "lzf", "gzip10", "gzip9", ...', default='lzf')
parser.add_argument('--proc', type=int,
default=multiprocessing.cpu_count(),
help='specify number of processes for parallelization')
return parser.parse_args()
def batched_imap(function, input_iterator, batch_size=100, imap=imap):
"""
Runs `function` using `imap` on batches of `batch_size`
taken from `input_iterator`.
yield results.
Only runs next imap when all previous results have been consumed.
This is useful if workers in the imap pool are faster than consumer
of the results, because the results might use up a lot of memory.
"""
def next_batch():
# list is necessary s.t. we can test for emptiness
return list(islice(input_iterator, batch_size))
batch = next_batch()
while batch:
logging.debug('Starting next batch.')
for res in imap(function, batch):
yield res
batch = next_batch()
def main():
## ARGS
args = parse_args()
## INIT LOGGING
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.DEBUG) # if not args.quiet else logging.WARN)
logging.info("args: %s"%args)
## INIT pool of workers
initargs=(args.plane, args.label, (args.boardsize, ), args.rankspec)
p = multiprocessing.Pool(args.proc, initializer=init_subprocess, initargs=initargs)
## INIT shapes and transformations
# the basic pathway is:
# imap job returns two lists [x1, x2, x3, ..], [y1, y2, y3, ..]
# of numpy arrays which we want to transform to be able to store in a dataset
# in a proper format
# first determine example shapes
b = gomill.boards.Board(args.boardsize)
init_subprocess(*initargs)
s = state.State(b, None, [], [('b',(3,3))], rank.BrWr(rank.Rank.from_key(1), # 1k
rank.Rank.from_key(2) # 2k
))
sample_x = get_cube(s, 'b')
sample_y = get_label(s, 'b')
# shape in dataset
dshape_x = sample_x.shape
dshape_y = sample_y.shape
# transformation of the returned lists
mapxs = lambda xs : xs
mapys = lambda ys : ys
## shrink unit dimension
# one dimensional values can be stored flattened
# s.t.
# 1000 examples of dimensions 1 have shape (1000,)
# instead of (1000, 1)
# this is probably the case only for the labels
# but support xs anyways
if args.shrink_units and sample_x.shape == (1, ):
mapxs = lambda xs : np.ndarray.flatten(np.array(xs))
dshape_x = tuple()
if args.shrink_units and sample_y.shape == (1, ):
mapys = lambda ys : np.ndarray.flatten(np.array(ys))
dshape_y = tuple()
transform_example_x = lambda x : x
transform_example_y = lambda y : y
## flatten
# do not flatten units
if args.flatten and dshape_x:
transform_example_x = np.ndarray.flatten
dshape_x = (reduce((lambda x,y : x*y), dshape_x), )
if args.flatten and dshape_y:
transform_example_y = np.ndarray.flatten
dshape_y = (reduce((lambda x,y : x*y), dshape_y), )
## dtype
dtype_x = sample_x.dtype
dtype_y = sample_y.dtype
recast_dtype = lambda a : a
if args.dtype:
recast_dtype = lambda a : np.array(a, dtype=args.dtype)
dtype_x = args.dtype
dtype_y = args.dtype
## compression
compression_kwargs = {}
if args.compression == 'lzf':
compression_kwargs['compression'] = 'lzf'
elif args.compression.startswith('gzip'):
compression_kwargs['compression'] = 'gzip'
level = int(args.compression[4:])
assert 0<=level<=10
compression_kwargs['compression_opts'] = level
else:
raise RuntimeError("Invalid compression arg.")
## INIT dataset
with h5py.File(args.filename) as f:
logging.debug("what: raw -> in dataset")
logging.debug("x.shape: %s -> %s"%(repr(sample_x.shape), repr(dshape_x) if dshape_x else 'flat'))
logging.debug("x.dtype: %s -> %s"%(sample_x.dtype, dtype_x))
logging.debug("y.shape: %s -> %s"%(repr(sample_y.shape), repr(dshape_y) if dshape_y else 'flat'))
logging.debug("y.dtype: %s -> %s"%(sample_y.dtype, dtype_y))
try:
kwargsx = {
# infinite number of examples
'maxshape' :(None,) + dshape_x,
'dtype' : dtype_x,
}
kwargsx.update(compression_kwargs)
dset_x = f.create_dataset(args.xname,
(0,) + dshape_x,
**kwargsx)
kwargsy = {
# infinite number of examples
'maxshape' :(None,) + dshape_y,
'dtype' : dtype_y,
}
kwargsy.update(compression_kwargs)
dset_y = f.create_dataset(args.yname,
(0,) + dshape_y,
**kwargsy)
except Exception as e:
logging.error("Cannot create dataset. File exists? (%s)"%(str(e)))
sys.exit(1)
dset_x.attrs['name'] = args.plane
dset_y.attrs['name'] = args.label
dset_x.attrs['boardsize'] = args.boardsize
dset_y.attrs['boardsize'] = args.boardsize
dset_x.attrs['original_dtype'] = repr(sample_x.dtype)
dset_y.attrs['original_dtype'] = repr(sample_y.dtype)
dset_x.attrs['original_example_shape'] = repr(sample_x.shape)
dset_y.attrs['original_example_shape'] = repr(sample_y.shape)
## map the job
if args.proc > 1:
def job_imap(*args):
return p.imap_unordered(*args)
else:
# do not use pool if only one proc
init_subprocess(*initargs)
def job_imap(*args):
return imap(*args)
it = batched_imap(process_game, sys.stdin, batch_size=1000, imap=job_imap)
size = 0
for num, ret in enumerate(it):
if not ret:
continue
xs, ys = ret
assert len(xs) == len(ys)
assert all(x.shape == sample_x.shape for x in xs)
assert all(y.shape == sample_y.shape for y in ys)
if xs:
add = len(xs)
logging.info("Storing %d examples."%add)
dset_x.resize((size+add,) + dshape_x)
dset_y.resize((size+add,) + dshape_y)
dset_x[-add:] = mapxs([transform_example_x(recast_dtype(x)) for x in xs])
dset_y[-add:] = mapys([transform_example_y(recast_dtype(y)) for y in ys])
size += add
logging.info("Finished.")
for dset in [dset_x, dset_y]:
logging.info("Dataset '%s': shape=%s, size=%s, dtype=%s"%(dset.name,
repr(dset.shape),
repr(dset.size),
repr(dset.dtype)))
if __name__ == "__main__":
main()
|
|
"""
Base and utility classes for tseries type pandas objects.
"""
import operator
from typing import Set
import warnings
import numpy as np
from pandas._libs import NaT, iNaT, lib
from pandas._libs.algos import unique_deltas
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_int64, is_dtype_equal, is_float, is_integer, is_list_like,
is_period_dtype, is_scalar)
from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
from pandas.core import algorithms, ops
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import ExtensionOpsMixin
from pandas.core.arrays.datetimelike import (
DatetimeLikeArrayMixin, _ensure_datetimelike_to_i8)
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.tools.timedeltas import to_timedelta
import pandas.io.formats.printing as printing
from pandas.tseries.frequencies import to_offset
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
def ea_passthrough(array_method):
"""
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
array_method : method on an Array class
Returns
-------
method
"""
def method(self, *args, **kwargs):
return array_method(self._data, *args, **kwargs)
method.__name__ = array_method.__name__
method.__doc__ = array_method.__doc__
return method
class DatetimeIndexOpsMixin(ExtensionOpsMixin):
"""
common ops mixin to support a unified interface datetimelike Index
"""
_data = None
# DatetimeLikeArrayMixin assumes subclasses are mutable, so these are
# properties there. They can be made into cache_readonly for Index
# subclasses bc they are immutable
inferred_freq = cache_readonly(
DatetimeLikeArrayMixin.inferred_freq.fget) # type: ignore
_isnan = cache_readonly(DatetimeLikeArrayMixin._isnan.fget) # type: ignore
hasnans = cache_readonly(
DatetimeLikeArrayMixin._hasnans.fget) # type: ignore
_hasnans = hasnans # for index / array -agnostic code
_resolution = cache_readonly(
DatetimeLikeArrayMixin._resolution.fget) # type: ignore
resolution = cache_readonly(
DatetimeLikeArrayMixin.resolution.fget) # type: ignore
_maybe_mask_results = ea_passthrough(
DatetimeLikeArrayMixin._maybe_mask_results)
__iter__ = ea_passthrough(DatetimeLikeArrayMixin.__iter__)
mean = ea_passthrough(DatetimeLikeArrayMixin.mean)
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._data.freq
@freq.setter
def freq(self, value):
# validation is handled by _data setter
self._data.freq = value
@property
def freqstr(self):
"""
Return the frequency object as a string if it is set, otherwise None.
"""
return self._data.freqstr
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self._data.unique()
# Note: if `self` is already unique, then self.unique() should share
# a `freq` with self. If not already unique, then self.freq must be
# None, so again sharing freq is correct.
return self._shallow_copy(result._data)
@classmethod
def _create_comparison_method(cls, op):
"""
Create a comparison method that dispatches to ``cls.values``.
"""
def wrapper(self, other):
if isinstance(other, ABCSeries):
# the arrays defer to Series for comparison ops but the indexes
# don't, so we have to unwrap here.
other = other._values
result = op(self._data, maybe_unwrap_index(other))
return result
wrapper.__doc__ = op.__doc__
wrapper.__name__ = '__{}__'.format(op.__name__)
return wrapper
@property
def _ndarray_values(self):
return self._data._ndarray_values
# ------------------------------------------------------------------------
# Abstract data attributes
@property
def values(self):
# Note: PeriodArray overrides this to return an ndarray of objects.
return self._data._data
@property # type: ignore # https://github.com/python/mypy/issues/1362
@Appender(DatetimeLikeArrayMixin.asi8.__doc__)
def asi8(self):
return self._data.asi8
# ------------------------------------------------------------------------
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, ABCIndexClass):
return False
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
except Exception:
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
elif is_period_dtype(self):
if not is_period_dtype(other):
return False
if self.freq != other.freq:
return False
return np.array_equal(self.asi8, other.asi8)
@staticmethod
def _join_i8_wrapper(joinf, dtype, with_indexers=True):
"""
Create the join wrapper methods.
"""
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
@staticmethod
def wrapper(left, right):
if isinstance(left, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
left = left.view('i8')
if isinstance(right, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
right = right.view('i8')
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise',
from_utc=False):
# See DatetimeLikeArrayMixin._ensure_localized.__doc__
if getattr(self, 'tz', None):
# ensure_localized is only relevant for tz-aware DTI
result = self._data._ensure_localized(arg,
ambiguous=ambiguous,
nonexistent=nonexistent,
from_utc=from_utc)
return type(self)._simple_new(result, name=self.name)
return arg
def _box_values(self, values):
return self._data._box_values(values)
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def __contains__(self, key):
try:
res = self.get_loc(key)
return (is_scalar(res) or isinstance(res, slice) or
(is_list_like(res) and len(res)))
except (KeyError, TypeError, ValueError):
return False
contains = __contains__
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, mapper, na_action=None):
try:
result = mapper(self)
# Try to use this result if we can
if isinstance(result, np.ndarray):
result = Index(result)
if not isinstance(result, Index):
raise TypeError('The map function must return an Index object')
return result
except Exception:
return self.astype(object).map(mapper)
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index.
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self._ndarray_values)
attribs = self._get_attributes_dict()
freq = attribs['freq']
if freq is not None and not is_period_dtype(self):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs['freq'] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_int64(indices)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
taken = self._assert_take_fillable(self.asi8, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=iNaT)
# keep freq in PeriodArray/Index, reset otherwise
freq = self.freq if is_period_dtype(self) else None
return self._shallow_copy(taken, freq=freq)
_can_hold_na = True
_na_value = NaT
"""The expected NA value to use with this index."""
@property
def asobject(self):
"""
Return object Index which contains boxed values.
.. deprecated:: 0.23.0
Use ``astype(object)`` instead.
*this is an internal non-public method*
"""
warnings.warn("'asobject' is deprecated. Use 'astype(object)'"
" instead", FutureWarning, stacklevel=2)
return self.astype(object)
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def tolist(self):
"""
Return a list of the underlying data.
"""
return list(self.astype(object))
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
if skipna:
min_stamp = self[~self._isnan].asi8.min()
else:
return self._na_value
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Series.max : Return the maximum value in a Series.
"""
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
if skipna:
max_stamp = self[~self._isnan].asi8.max()
else:
return self._na_value
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
# --------------------------------------------------------------------
# Rendering Methods
def _format_with_header(self, header, na_rep='NaT', **kwargs):
return header + list(self._format_native_types(na_rep, **kwargs))
@property
def _formatter_func(self):
raise AbstractMethodError(self)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
if attrib == 'freq':
freq = self.freqstr
if freq is not None:
freq = "'%s'" % freq
attrs.append(('freq', freq))
return attrs
# --------------------------------------------------------------------
def _convert_scalar_indexer(self, key, kind=None):
"""
We don't allow integer or float indexing on datetime-like when using
loc.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ['loc'] and (is_int or is_flt):
self._invalid_indexer('index', key)
elif kind in ['ix', 'getitem'] and is_flt:
self._invalid_indexer('index', key)
return super()._convert_scalar_indexer(key, kind=kind)
@classmethod
def _add_datetimelike_methods(cls):
"""
Add in the datetimelike methods (as we may have to override the
superclass).
"""
def __add__(self, other):
# dispatch to ExtensionArray implementation
result = self._data.__add__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__add__ = __add__
def __radd__(self, other):
# alias for __add__
return self.__add__(other)
cls.__radd__ = __radd__
def __sub__(self, other):
# dispatch to ExtensionArray implementation
result = self._data.__sub__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__sub__ = __sub__
def __rsub__(self, other):
result = self._data.__rsub__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__rsub__ = __rsub__
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if level is not None:
self._validate_index_level(level)
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.astype(object).isin(values)
return algorithms.isin(self.asi8, values.asi8)
def intersection(self, other, sort=False):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
if len(self) == 0:
return self.copy()
if len(other) == 0:
return other.copy()
if not isinstance(other, type(self)):
result = Index.intersection(self, other, sort=sort)
if isinstance(result, type(self)):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
elif (other.freq is None or self.freq is None or
other.freq != self.freq or
not other.freq.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other, sort=sort)
# Invalidate the freq of `result`, which may not be correct at
# this point, depending on the values.
result.freq = None
if hasattr(self, 'tz'):
result = self._shallow_copy(result._values, name=result.name,
tz=result.tz, freq=None)
else:
result = self._shallow_copy(result._values, name=result.name,
freq=None)
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
# after sorting, the intersection always starts with the right index
# and ends with the index of which the last elements is smallest
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
@Appender(_index_shared_docs['repeat'] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
freq = self.freq if is_period_dtype(self) else None
return self._shallow_copy(self.asi8.repeat(repeats), freq=freq)
@Appender(_index_shared_docs['where'] % _index_doc_kwargs)
def where(self, cond, other=None):
other = _ensure_datetimelike_to_i8(other, to_utc=True)
values = _ensure_datetimelike_to_i8(self, to_utc=True)
result = np.where(cond, values, other).astype('i8')
result = self._ensure_localized(result, from_utc=True)
return self._shallow_copy(result)
def _summary(self, name=None):
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
formatter = self._formatter_func
if len(self) > 0:
index_summary = ', %s to %s' % (formatter(self[0]),
formatter(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (printing.pprint_thing(name),
len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
# display as values, not quoted
result = result.replace("'", "")
return result
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class.
"""
attribs = self._get_attributes_dict()
attribs['name'] = name
# do not pass tz to set because tzlocal cannot be hashed
if len({str(x.dtype) for x in to_concat}) != 1:
raise ValueError('to_concat must have the same tz')
new_data = type(self._values)._concat_same_type(to_concat).asi8
# GH 3232: If the concat result is evenly spaced, we can retain the
# original frequency
is_diff_evenly_spaced = len(unique_deltas(new_data)) == 1
if not is_period_dtype(self) and not is_diff_evenly_spaced:
# reset freq
attribs['freq'] = None
return self._simple_new(new_data, **attribs)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype) and copy is False:
# Ensure that self.astype(self.dtype) is self
return self
new_values = self._data.astype(dtype, copy=copy)
# pass copy=False because any copying will be done in the
# _data.astype call above
return Index(new_values,
dtype=new_values.dtype, name=self.name, copy=False)
@deprecate_kwarg(old_arg_name='n', new_arg_name='periods')
def shift(self, periods, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
result = self._data._time_shift(periods, freq=freq)
return type(self)(result, name=self.name)
def wrap_arithmetic_op(self, other, result):
if result is NotImplemented:
return NotImplemented
if isinstance(result, tuple):
# divmod, rdivmod
assert len(result) == 2
return (wrap_arithmetic_op(self, other, result[0]),
wrap_arithmetic_op(self, other, result[1]))
if not isinstance(result, Index):
# Index.__new__ will choose appropriate subclass for dtype
result = Index(result)
res_name = ops.get_op_result_name(self, other)
result.name = res_name
return result
def maybe_unwrap_index(obj):
"""
If operating against another Index object, we need to unwrap the underlying
data before deferring to the DatetimeArray/TimedeltaArray/PeriodArray
implementation, otherwise we will incorrectly return NotImplemented.
Parameters
----------
obj : object
Returns
-------
unwrapped object
"""
if isinstance(obj, ABCIndexClass):
return obj._data
return obj
class DatetimelikeDelegateMixin(PandasDelegate):
"""
Delegation mechanism, specific for Datetime, Timedelta, and Period types.
Functionality is delegated from the Index class to an Array class. A
few things can be customized
* _delegate_class : type
The class being delegated to.
* _delegated_methods, delegated_properties : List
The list of property / method names being delagated.
* raw_methods : Set
The set of methods whose results should should *not* be
boxed in an index, after being returned from the array
* raw_properties : Set
The set of properties whose results should should *not* be
boxed in an index, after being returned from the array
"""
# raw_methods : dispatch methods that shouldn't be boxed in an Index
_raw_methods = set() # type: Set[str]
# raw_properties : dispatch properties that shouldn't be boxed in an Index
_raw_properties = set() # type: Set[str]
name = None
_data = None
@property
def _delegate_class(self):
raise AbstractMethodError
def _delegate_property_get(self, name, *args, **kwargs):
result = getattr(self._data, name)
if name not in self._raw_properties:
result = Index(result, name=self.name)
return result
def _delegate_property_set(self, name, value, *args, **kwargs):
setattr(self._data, name, value)
def _delegate_method(self, name, *args, **kwargs):
result = operator.methodcaller(name, *args, **kwargs)(self._data)
if name not in self._raw_methods:
result = Index(result, name=self.name)
return result
|
|
"""Decide which plugins to use for authentication & installation"""
from __future__ import print_function
import os
import logging
import zope.component
from certbot import errors
from certbot import interfaces
from certbot.display import util as display_util
logger = logging.getLogger(__name__)
z_util = zope.component.getUtility
def pick_configurator(
config, default, plugins,
question="How would you like to authenticate and install "
"certificates?"):
"""Pick configurator plugin."""
return pick_plugin(
config, default, plugins, question,
(interfaces.IAuthenticator, interfaces.IInstaller))
def pick_installer(config, default, plugins,
question="How would you like to install certificates?"):
"""Pick installer plugin."""
return pick_plugin(
config, default, plugins, question, (interfaces.IInstaller,))
def pick_authenticator(
config, default, plugins, question="How would you "
"like to authenticate with the ACME CA?"):
"""Pick authentication plugin."""
return pick_plugin(
config, default, plugins, question, (interfaces.IAuthenticator,))
def pick_plugin(config, default, plugins, question, ifaces):
"""Pick plugin.
:param certbot.interfaces.IConfig: Configuration
:param str default: Plugin name supplied by user or ``None``.
:param certbot.plugins.disco.PluginsRegistry plugins:
All plugins registered as entry points.
:param str question: Question to be presented to the user in case
multiple candidates are found.
:param list ifaces: Interfaces that plugins must provide.
:returns: Initialized plugin.
:rtype: IPlugin
"""
if default is not None:
# throw more UX-friendly error if default not in plugins
filtered = plugins.filter(lambda p_ep: p_ep.name == default)
else:
if config.noninteractive_mode:
# it's really bad to auto-select the single available plugin in
# non-interactive mode, because an update could later add a second
# available plugin
raise errors.MissingCommandlineFlag(
"Missing command line flags. For non-interactive execution, "
"you will need to specify a plugin on the command line. Run "
"with '--help plugins' to see a list of options, and see "
"https://eff.org/letsencrypt-plugins for more detail on what "
"the plugins do and how to use them.")
filtered = plugins.visible().ifaces(ifaces)
filtered.init(config)
verified = filtered.verify(ifaces)
verified.prepare()
prepared = verified.available()
if len(prepared) > 1:
logger.debug("Multiple candidate plugins: %s", prepared)
plugin_ep = choose_plugin(prepared.values(), question)
if plugin_ep is None:
return None
else:
return plugin_ep.init()
elif len(prepared) == 1:
plugin_ep = prepared.values()[0]
logger.debug("Single candidate plugin: %s", plugin_ep)
if plugin_ep.misconfigured:
return None
return plugin_ep.init()
else:
logger.debug("No candidate plugin")
return None
def choose_plugin(prepared, question):
"""Allow the user to choose their plugin.
:param list prepared: List of `~.PluginEntryPoint`.
:param str question: Question to be presented to the user.
:returns: Plugin entry point chosen by the user.
:rtype: `~.PluginEntryPoint`
"""
opts = [plugin_ep.description_with_name +
(" [Misconfigured]" if plugin_ep.misconfigured else "")
for plugin_ep in prepared]
while True:
disp = z_util(interfaces.IDisplay)
code, index = disp.menu(question, opts, help_label="More Info")
if code == display_util.OK:
plugin_ep = prepared[index]
if plugin_ep.misconfigured:
z_util(interfaces.IDisplay).notification(
"The selected plugin encountered an error while parsing "
"your server configuration and cannot be used. The error "
"was:\n\n{0}".format(plugin_ep.prepare()),
height=display_util.HEIGHT, pause=False)
else:
return plugin_ep
elif code == display_util.HELP:
if prepared[index].misconfigured:
msg = "Reported Error: %s" % prepared[index].prepare()
else:
msg = prepared[index].init().more_info()
z_util(interfaces.IDisplay).notification(
msg, height=display_util.HEIGHT)
else:
return None
noninstaller_plugins = ["webroot", "manual", "standalone"]
def record_chosen_plugins(config, plugins, auth, inst):
"Update the config entries to reflect the plugins we actually selected."
cn = config.namespace
cn.authenticator = plugins.find_init(auth).name if auth else "None"
cn.installer = plugins.find_init(inst).name if inst else "None"
def choose_configurator_plugins(config, plugins, verb):
"""
Figure out which configurator we're going to use, modifies
config.authenticator and config.installer strings to reflect that choice if
necessary.
:raises errors.PluginSelectionError if there was a problem
:returns: (an `IAuthenticator` or None, an `IInstaller` or None)
:rtype: tuple
"""
req_auth, req_inst = cli_plugin_requests(config)
# Which plugins do we need?
if verb == "run":
need_inst = need_auth = True
from certbot.cli import cli_command
if req_auth in noninstaller_plugins and not req_inst:
msg = ('With the {0} plugin, you probably want to use the "certonly" command, eg:{1}'
'{1} {2} certonly --{0}{1}{1}'
'(Alternatively, add a --installer flag. See https://eff.org/letsencrypt-plugins'
'{1} and "--help plugins" for more information.)'.format(
req_auth, os.linesep, cli_command))
raise errors.MissingCommandlineFlag(msg)
else:
need_inst = need_auth = False
if verb == "certonly":
need_auth = True
if verb == "install":
need_inst = True
if config.authenticator:
logger.warn("Specifying an authenticator doesn't make sense in install mode")
# Try to meet the user's request and/or ask them to pick plugins
authenticator = installer = None
if verb == "run" and req_auth == req_inst:
# Unless the user has explicitly asked for different auth/install,
# only consider offering a single choice
authenticator = installer = pick_configurator(config, req_inst, plugins)
else:
if need_inst or req_inst:
installer = pick_installer(config, req_inst, plugins)
if need_auth:
authenticator = pick_authenticator(config, req_auth, plugins)
logger.debug("Selected authenticator %s and installer %s", authenticator, installer)
# Report on any failures
if need_inst and not installer:
diagnose_configurator_problem("installer", req_inst, plugins)
if need_auth and not authenticator:
diagnose_configurator_problem("authenticator", req_auth, plugins)
record_chosen_plugins(config, plugins, authenticator, installer)
return installer, authenticator
def set_configurator(previously, now):
"""
Setting configurators multiple ways is okay, as long as they all agree
:param str previously: previously identified request for the installer/authenticator
:param str requested: the request currently being processed
"""
if not now:
# we're not actually setting anything
return previously
if previously:
if previously != now:
msg = "Too many flags setting configurators/installers/authenticators {0} -> {1}"
raise errors.PluginSelectionError(msg.format(repr(previously), repr(now)))
return now
def cli_plugin_requests(config):
"""
Figure out which plugins the user requested with CLI and config options
:returns: (requested authenticator string or None, requested installer string or None)
:rtype: tuple
"""
req_inst = req_auth = config.configurator
req_inst = set_configurator(req_inst, config.installer)
req_auth = set_configurator(req_auth, config.authenticator)
if config.nginx:
req_inst = set_configurator(req_inst, "nginx")
req_auth = set_configurator(req_auth, "nginx")
if config.apache:
req_inst = set_configurator(req_inst, "apache")
req_auth = set_configurator(req_auth, "apache")
if config.standalone:
req_auth = set_configurator(req_auth, "standalone")
if config.webroot:
req_auth = set_configurator(req_auth, "webroot")
if config.manual:
req_auth = set_configurator(req_auth, "manual")
logger.debug("Requested authenticator %s and installer %s", req_auth, req_inst)
return req_auth, req_inst
def diagnose_configurator_problem(cfg_type, requested, plugins):
"""
Raise the most helpful error message about a plugin being unavailable
:param str cfg_type: either "installer" or "authenticator"
:param str requested: the plugin that was requested
:param .PluginsRegistry plugins: available plugins
:raises error.PluginSelectionError: if there was a problem
"""
if requested:
if requested not in plugins:
msg = "The requested {0} plugin does not appear to be installed".format(requested)
else:
msg = ("The {0} plugin is not working; there may be problems with "
"your existing configuration.\nThe error was: {1!r}"
.format(requested, plugins[requested].problem))
elif cfg_type == "installer":
if os.path.exists("/etc/debian_version"):
# Debian... installers are at least possible
msg = ('No installers seem to be present and working on your system; '
'fix that or try running certbot with the "certonly" command')
else:
# XXX update this logic as we make progress on #788 and nginx support
msg = ('No installers are available on your OS yet; try running '
'"letsencrypt-auto certonly" to get a cert you can install manually')
else:
msg = "{0} could not be determined or is not installed".format(cfg_type)
raise errors.PluginSelectionError(msg)
|
|
# This file is part of gorm, an object relational mapper for versioned graphs.
# Copyright (C) 2014 Zachary Spector.
from collections import defaultdict, deque
from .graph import (
Graph,
DiGraph,
MultiGraph,
MultiDiGraph,
)
from .query import QueryEngine
from .cache import Cache, NodesCache, EdgesCache
class GraphNameError(KeyError):
pass
class ORM(object):
"""Instantiate this with the same string argument you'd use for a
SQLAlchemy ``create_engine`` call. This will be your interface to
gorm.
"""
def __init__(
self,
dbstring,
alchemy=True,
connect_args={},
query_engine_class=QueryEngine,
json_dump=None,
json_load=None,
caching=True
):
"""Make a SQLAlchemy engine if possible, else a sqlite3 connection. In
either case, begin a transaction.
"""
self.db = query_engine_class(dbstring, connect_args, alchemy, json_dump, json_load)
self._obranch = None
self._orev = None
self.db.initdb()
# I will be recursing a lot so just cache all the branch info
if caching:
self.caching = True
self._global_cache = self.db._global_cache = {}
for k, v in self.db.global_items():
if k == 'branch':
self._obranch = v
elif k == 'rev':
self._orev = v
else:
self._global_cache[k] = v
self._childbranch = defaultdict(set)
self._parentbranch_rev = {}
for (branch, parent, parent_rev) in self.db.all_branches():
if branch != 'master':
self._parentbranch_rev[branch] = (parent, parent_rev)
self._childbranch[parent].add(branch)
self.graph = {}
for (graph, typ) in self.db.graphs_types():
self.graph[graph] = {
'Graph': Graph,
'DiGraph': DiGraph,
'MultiGraph': MultiGraph,
'MultiDiGraph': MultiDiGraph
}[typ](self, graph)
self._obranch = self.branch
self._orev = self.rev
self._active_branches_cache = []
self.db.active_branches = self._active_branches
todo = deque(self.db.timestream_data())
while todo:
(branch, parent, parent_tick) = working = todo.popleft()
if branch == 'master':
continue
if parent in self._branches:
self._parentbranch_rev[branch] = (parent, parent_tick)
self._childbranch[parent].add(branch)
else:
todo.append(working)
self._graph_val_cache = Cache(self)
for row in self.db.graph_val_dump():
self._graph_val_cache.store(*row)
self._node_val_cache = Cache(self)
for row in self.db.node_val_dump():
self._node_val_cache.store(*row)
self._nodes_cache = NodesCache(self)
for row in self.db.nodes_dump():
self._nodes_cache.store(*row)
self._edge_val_cache = Cache(self)
for row in self.db.edge_val_dump():
self._edge_val_cache.store(*row)
self._edges_cache = EdgesCache(self)
for row in self.db.edges_dump():
self._edges_cache.store(*row)
def __enter__(self):
"""Enable the use of the ``with`` keyword"""
return self
def __exit__(self, *args):
"""Alias for ``close``"""
self.close()
def _havebranch(self, b):
"""Private use. Checks that the branch is known about."""
if self.caching and b in self._parentbranch_rev:
return True
return self.db.have_branch(b)
def is_parent_of(self, parent, child):
"""Return whether ``child`` is a branch descended from ``parent`` at
any remove.
"""
if parent == 'master':
return True
if child == 'master':
return False
if child not in self._parentbranch_rev:
raise ValueError("The branch {} seems not to have ever been created".format(child))
if self._parentbranch_rev[child][0] == parent:
return True
return self.is_parent_of(parent, self._parentbranch_rev[child][0])
@property
def branch(self):
"""Return the global value ``branch``, or ``self._obranch`` if it's
set
"""
if self._obranch is not None:
return self._obranch
return self.db.globl['branch']
@branch.setter
def branch(self, v):
"""Set the global value ``branch`` and note that the branch's (parent,
parent_rev) are the (branch, tick) set previously
"""
curbranch = self.branch
currev = self.rev
if not self._havebranch(v):
# assumes the present revision in the parent branch has
# been finalized.
self.db.new_branch(v, curbranch, currev)
# make sure I'll end up within the revision range of the
# destination branch
if v != 'master':
if self.caching:
if v not in self._parentbranch_rev:
self._parentbranch_rev[v] = (curbranch, currev)
parrev = self._parentbranch_rev[v][1]
else:
parrev = self.db.parrev(v)
if currev < parrev:
raise ValueError(
"Tried to jump to branch {br}, which starts at revision {rv}. "
"Go to rev {rv} or later to use this branch.".format(
br=v,
rv=parrev
)
)
if self.caching:
self._obranch = v
else:
self.db.globl['branch'] = v
@property
def rev(self):
"""Return the global value ``rev``, or ``self._orev`` if that's set"""
if self._orev is not None:
return self._orev
return self.db.globl['rev']
@rev.setter
def rev(self, v):
"""Set the global value ``rev``, first checking that it's not before
the start of this branch. If it is, also go to the parent
branch.
"""
# first make sure the cursor is not before the start of this branch
branch = self.branch
if branch != 'master':
if self.caching:
(parent, parent_rev) = self._parentbranch_rev[branch]
else:
(parent, parent_rev) = self.db.parparrev(branch)
if v < int(parent_rev):
raise ValueError(
"The revision number {revn} "
"occurs before the start of "
"the branch {brnch}".format(revn=v, brnch=branch)
)
if self.caching:
self._orev = v
else:
self.db.globl['rev'] = v
def commit(self):
"""Alias of ``self.db.commit``"""
if self.caching:
self.db.globl['branch'] = self._obranch
self.db.globl['rev'] = self._orev
self.db.commit()
def close(self):
"""Alias of ``self.db.close``"""
if self.caching:
self.db.globl['branch'] = self._obranch
self.db.globl['rev'] = self._orev
self.db.close()
def initdb(self):
"""Alias of ``self.db.initdb``"""
self.db.initdb()
def _init_graph(self, name, type_s='Graph'):
if self.db.have_graph(name):
raise GraphNameError("Already have a graph by that name")
self.db.new_graph(name, type_s)
def new_graph(self, name, data=None, **attr):
"""Return a new instance of type Graph, initialized with the given
data if provided.
"""
self._init_graph(name, 'Graph')
g = Graph(self, name, data, **attr)
if self.caching:
self.graph[name] = g
return g
def new_digraph(self, name, data=None, **attr):
"""Return a new instance of type DiGraph, initialized with the given
data if provided.
"""
self._init_graph(name, 'DiGraph')
dg = DiGraph(self, name, data, **attr)
if self.caching:
self.graph[name] = dg
return dg
def new_multigraph(self, name, data=None, **attr):
"""Return a new instance of type MultiGraph, initialized with the given
data if provided.
"""
self._init_graph(name, 'MultiGraph')
mg = MultiGraph(self, name, data, **attr)
if self.caching:
self.graph[name] = mg
return mg
def new_multidigraph(self, name, data=None, **attr):
"""Return a new instance of type MultiDiGraph, initialized with the given
data if provided.
"""
self._init_graph(name, 'MultiDiGraph')
mdg = MultiDiGraph(self, name, data, **attr)
if self.caching:
self.graph[name] = mdg
return mdg
def get_graph(self, name):
"""Return a graph previously created with ``new_graph``,
``new_digraph``, ``new_multigraph``, or
``new_multidigraph``
"""
if self.caching and name in self.graph:
return self.graph[name]
graphtypes = {
'Graph': Graph,
'DiGraph': DiGraph,
'MultiGraph': MultiGraph,
'MultiDiGraph': MultiDiGraph
}
type_s = self.db.graph_type(name)
if type_s not in graphtypes:
raise GraphNameError("I don't know of a graph named {}".format(name))
g = graphtypes[type_s](self, name)
if self.caching:
self.graph[name] = g
return g
def del_graph(self, name):
"""Remove all traces of a graph's existence from the database"""
# make sure the graph exists before deleting anything
self.get_graph(name)
self.db.del_graph(name)
if self.caching and name in self.graph:
del self.graph[name]
def _active_branches(self, branch=None, rev=None):
"""Private use. Iterate over (branch, rev) pairs, where the branch is
a descendant of the previous (starting with whatever branch is
presently active and ending at 'master'), and the rev is the
latest revision in the branch that matters.
"""
b = branch or self.branch
r = rev or self.rev
if self.caching:
yield b, r
while b in self._parentbranch_rev:
(b, r) = self._parentbranch_rev[b]
yield b, r
return
for pair in self.db.active_branches(b, r):
yield pair
def _branch_descendants(self, branch=None):
"""Iterate over all branches immediately descended from the current
one (or the given one, if available).
"""
branch = branch or self.branch
if not self.caching:
for desc in self.db.branch_descendants(branch):
yield desc
return
for (parent, (child, rev)) in self._parentbranch_rev.items():
if parent == branch:
yield child
__all__ = [ORM, 'alchemy', 'graph', 'query', 'window', 'xjson']
|
|
# -*- coding: utf-8 -*-
import json
from tests import TestElasticmock, INDEX_NAME, DOC_TYPE, BODY, DOC_ID
class TestBulk(TestElasticmock):
def test_should_bulk_index_documents_index_creates(self):
action = {'index': {'_index': INDEX_NAME, '_type': DOC_TYPE}}
action_json = json.dumps(action)
body_json = json.dumps(BODY, default=str)
num_of_documents = 10
lines = []
for count in range(0, num_of_documents):
lines.append(action_json)
lines.append(body_json)
body = '\n'.join(lines)
data = self.es.bulk(body=body)
items = data.get('items')
self.assertFalse(data.get('errors'))
self.assertEqual(num_of_documents, len(items))
for item in items:
index = item.get('index')
self.assertEqual(DOC_TYPE, index.get('_type'))
self.assertEqual(INDEX_NAME, index.get('_index'))
self.assertEqual('created', index.get('result'))
self.assertEqual(201, index.get('status'))
def test_should_bulk_index_documents_create_creates(self):
create_action = {'create': {'_index': INDEX_NAME, '_type': DOC_TYPE}}
create_with_id = {'create': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': DOC_ID}}
actions = [
json.dumps(create_action),
json.dumps(BODY, default=str),
json.dumps(create_action),
json.dumps(BODY, default=str),
json.dumps(create_with_id),
json.dumps(BODY, default=str),
# Will fail on created document with the same ID
json.dumps(create_with_id),
json.dumps(BODY, default=str),
]
body = '\n'.join(actions)
data = self.es.bulk(body=body)
items = data.get('items')
self.assertTrue(data.get('errors'))
self.assertEqual(4, len(items))
last_item = items.pop()
self.assertEqual(last_item['create']['error'], 'version_conflict_engine_exception')
self.assertEqual(last_item['create']['status'], 409)
for item in items:
index = item.get('create')
self.assertEqual(DOC_TYPE, index.get('_type'))
self.assertEqual(INDEX_NAME, index.get('_index'))
self.assertEqual('created', index.get('result'))
self.assertEqual(201, index.get('status'))
def test_should_bulk_index_documents_index_updates(self):
action = {'index': {'_index': INDEX_NAME, '_id': DOC_ID, '_type': DOC_TYPE}}
action_json = json.dumps(action)
body_json = json.dumps(BODY, default=str)
num_of_documents = 10
lines = []
for count in range(0, num_of_documents):
lines.append(action_json)
lines.append(body_json)
body = '\n'.join(lines)
data = self.es.bulk(body=body)
items = data.get('items')
self.assertFalse(data.get('errors'))
self.assertEqual(num_of_documents, len(items))
first_item = items.pop(0)
self.assertEqual(first_item["index"]["status"], 201)
self.assertEqual(first_item["index"]["result"], "created")
for item in items:
index = item.get('index')
self.assertEqual(DOC_TYPE, index.get('_type'))
self.assertEqual(INDEX_NAME, index.get('_index'))
self.assertEqual('updated', index.get('result'))
self.assertEqual(200, index.get('status'))
def test_should_bulk_index_documents_update_updates(self):
action = {'update': {'_index': INDEX_NAME, '_id': DOC_ID, '_type': DOC_TYPE}}
action_json = json.dumps(action)
create_action_json = json.dumps(
{'create': {'_index': INDEX_NAME, '_id': DOC_ID, '_type': DOC_TYPE}}
)
body_json = json.dumps({'doc': BODY}, default=str)
num_of_documents = 4
lines = [create_action_json, json.dumps(BODY, default=str)]
for count in range(0, num_of_documents):
lines.append(action_json)
lines.append(body_json)
body = '\n'.join(lines)
data = self.es.bulk(body=body)
items = data.get('items')
self.assertFalse(data.get('errors'))
self.assertEqual(num_of_documents + 1, len(items))
first_item = items.pop(0)
self.assertEqual(first_item["create"]["status"], 201)
self.assertEqual(first_item["create"]["result"], "created")
for item in items:
index = item.get('update')
self.assertEqual(DOC_TYPE, index.get('_type'))
self.assertEqual(INDEX_NAME, index.get('_index'))
self.assertEqual('updated', index.get('result'))
self.assertEqual(200, index.get('status'))
def test_should_bulk_index_documents_delete_deletes(self):
delete_action = {'delete': {'_index': INDEX_NAME, '_id': DOC_ID, '_type': DOC_TYPE}}
delete_action_json = json.dumps(delete_action)
create_action_json = json.dumps(
{'create': {'_index': INDEX_NAME, '_id': DOC_ID, '_type': DOC_TYPE}}
)
lines = [
create_action_json,
json.dumps(BODY, default=str),
delete_action_json,
]
body = '\n'.join(lines)
data = self.es.bulk(body=body)
items = data.get('items')
self.assertFalse(data.get('errors'))
self.assertEqual(2, len(items))
first_item = items.pop(0)
self.assertEqual(first_item["create"]["status"], 201)
self.assertEqual(first_item["create"]["result"], "created")
self.assertEqual(first_item["create"]['_type'], DOC_TYPE)
self.assertEqual(first_item["create"]['_id'], DOC_ID)
second_item = items.pop(0)
self.assertEqual(second_item["delete"]["status"], 200)
self.assertEqual(second_item["delete"]["result"], "deleted")
self.assertEqual(second_item["delete"]['_type'], DOC_TYPE)
self.assertEqual(second_item["delete"]['_id'], DOC_ID)
def test_should_bulk_index_documents_mixed_actions(self):
doc_body = json.dumps(BODY, default=str)
doc_id_1 = 1
doc_id_2 = 2
actions = [
json.dumps({'create': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}),
doc_body, # 201
json.dumps({'create': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}),
doc_body, # 409
json.dumps({'index': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_2}}),
doc_body, # 201
json.dumps({'index': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_2}}),
doc_body, # 200
json.dumps({'update': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}),
doc_body, # 200
json.dumps({'delete': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}),
# 200
json.dumps({'update': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}),
doc_body, # 404
json.dumps({'delete': {'_index': INDEX_NAME, '_type': DOC_TYPE, '_id': doc_id_1}}),
# 404
]
body = '\n'.join(actions)
data = self.es.bulk(body=body)
expected = [
{'create': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME,
'_version': 1, 'status': 201, 'result': 'created'}},
{'create': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME,
'_version': 1, 'status': 409,
'error': 'version_conflict_engine_exception'}},
{'index': {'_type': DOC_TYPE, '_id': 2, '_index': INDEX_NAME,
'_version': 1, 'status': 201, 'result': 'created'}},
{'index': {'_type': DOC_TYPE, '_id': 2, '_index': INDEX_NAME,
'_version': 1, 'status': 200, 'result': 'updated'}},
{'update': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME,
'_version': 1, 'status': 200, 'result': 'updated'}},
{'delete': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME,
'_version': 1, 'result': 'deleted', 'status': 200}},
{'update': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME,
'_version': 1, 'status': 404, 'error': 'document_missing_exception'}},
{'delete': {'_type': DOC_TYPE, '_id': 1, '_index': INDEX_NAME,
'_version': 1, 'error': 'not_found', 'status': 404}},
]
actual = data.get('items')
self.assertTrue(data.get('errors'))
self.assertEqual(actual, expected)
|
|
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing('analysis')
options.register('config', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Single-switch config. Values: Prompt17, Summer16')
options.register('globaltag', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Global tag')
options.register('pdfname', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'PDF name')
options.register('redojec', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Redo JEC')
options.register('connect', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Globaltag connect')
options.register('lumilist', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Good lumi list JSON')
options.register('isData', default = False, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.bool, info = 'True if running on Data, False if running on MC')
options.register('useTrigger', default = True, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.bool, info = 'Fill trigger information')
options.register('printLevel', default = 0, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.int, info = 'Debug level of the ntuplizer')
options.register('skipEvents', default = 0, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.int, info = 'Skip first events')
options._tags.pop('numEvent%d')
options._tagOrder.remove('numEvent%d')
options.parseArguments()
options.config = '31Mar2018'
# Global tags
# https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideFrontierConditions
if options.config == '31Mar2018':
# re-miniaod of 2017 legacy rereco
options.isData = True
options.globaltag = '94X_dataRun2_ReReco_EOY17_v6'
options.redojec = True
elif options.config == '2018Prompt':
options.isData = True
options.globaltag = '101X_dataRun2_Prompt_v10'
elif options.config == 'Fall17':
options.isData = False
options.globaltag = '94X_mc2017_realistic_v14'
options.pdfname = 'NNPDF3.1'
options.redojec = True
elif options.config:
raise RuntimeError('Unknown config ' + options.config)
import FWCore.ParameterSet.Config as cms
process = cms.Process('NTUPLES')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 100
for cat in ['PandaProducer', 'JetPtMismatchAtLowPt', 'JetPtMismatch', 'NullTransverseMomentum', 'MissingJetConstituent']:
process.MessageLogger.categories.append(cat)
setattr(process.MessageLogger.cerr, cat, cms.untracked.PSet(limit = cms.untracked.int32(10)))
############
## SOURCE ##
############
### INPUT FILES
process.source = cms.Source('PoolSource',
skipEvents = cms.untracked.uint32(options.skipEvents),
fileNames = cms.untracked.vstring(options.inputFiles)
)
### NUMBER OF EVENTS
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(options.maxEvents)
)
### LUMI MASK
if options.lumilist != '':
import FWCore.PythonUtilities.LumiList as LumiList
process.source.lumisToProcess = LumiList.LumiList(filename = options.lumilist).getVLuminosityBlockRange()
##############
## SERVICES ##
##############
process.load('Configuration.Geometry.GeometryRecoDB_cff')
if not options.isData:
process.load('Configuration.Geometry.GeometrySimDB_cff')
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.GlobalTag.globaltag = options.globaltag
process.RandomNumberGeneratorService.panda = cms.PSet(
initialSeed = cms.untracked.uint32(1234567),
engineName = cms.untracked.string('TRandom3')
)
process.RandomNumberGeneratorService.smearedElectrons = cms.PSet(
initialSeed = cms.untracked.uint32(89101112),
engineName = cms.untracked.string('TRandom3')
)
process.RandomNumberGeneratorService.smearedPhotons = cms.PSet(
initialSeed = cms.untracked.uint32(13141516),
engineName = cms.untracked.string('TRandom3')
)
#############################
## RECO SEQUENCE AND SKIMS ##
#############################
### EGAMMA CORRECTIONS
# https://twiki.cern.ch/twiki/bin/view/CMS/EGMSmearer
process.selectedElectrons = cms.EDFilter('PATElectronSelector',
src = cms.InputTag('slimmedElectrons'),
cut = cms.string('pt > 5 && abs(eta) < 2.5')
)
from EgammaAnalysis.ElectronTools.calibratedPatElectronsRun2_cfi import calibratedPatElectrons
from EgammaAnalysis.ElectronTools.calibratedPatPhotonsRun2_cfi import calibratedPatPhotons
import EgammaAnalysis.ElectronTools.calibrationTablesRun2
egmSmearingSource = EgammaAnalysis.ElectronTools.calibrationTablesRun2.files
egmSmearingType = 'Run2017_17Nov2017_v1'
process.smearedElectrons = calibratedPatElectrons.clone(
electrons = 'selectedElectrons',
isMC = (not options.isData),
correctionFile = egmSmearingSource[egmSmearingType]
)
process.smearedPhotons = calibratedPatPhotons.clone(
photons = 'slimmedPhotons',
isMC = (not options.isData),
correctionFile = egmSmearingSource[egmSmearingType]
)
egmCorrectionSequence = cms.Sequence(
process.selectedElectrons +
process.smearedElectrons +
process.smearedPhotons
)
### Vanilla MET
# this is the most basic MET one can find
# even if we override with various types of MET later on, create this so we have a consistent calo MET
# https://twiki.cern.ch/twiki/bin/view/CMS/MissingETUncertaintyPrescription
from PhysicsTools.PatUtils.tools.runMETCorrectionsAndUncertainties import runMetCorAndUncFromMiniAOD
runMetCorAndUncFromMiniAOD(
process,
isData = options.isData,
)
metSequence = cms.Sequence(
process.fullPatMetSequence
)
### PUPPI
from PhysicsTools.PatAlgos.slimming.puppiForMET_cff import makePuppiesFromMiniAOD
## Creates process.puppiMETSequence which includes 'puppi' and 'puppiForMET' (= EDProducer('PuppiPhoton'))
## By default, does not use specific photon ID for PuppiPhoton (which was the case in 80X)
makePuppiesFromMiniAOD(process, createScheduledSequence = True)
## Just renaming
puppiSequence = process.puppiMETSequence
process.puppiNoLep.useExistingWeights = False
process.puppi.useExistingWeights = False
### CHS
process.pfCHS = cms.EDFilter('CandPtrSelector',
src = cms.InputTag('packedPFCandidates'),
cut = cms.string('fromPV')
)
### EGAMMA ID
# https://twiki.cern.ch/twiki/bin/view/CMS/EgammaIDRecipesRun2
# https://twiki.cern.ch/twiki/bin/view/CMS/CutBasedElectronIdentificationRun2
# https://twiki.cern.ch/twiki/bin/view/CMS/CutBasedPhotonIdentificationRun2
electronIdParams = {
'vetoId': 'egmGsfElectronIDs:cutBasedElectronID-Fall17-94X-V1-veto',
'looseId': 'egmGsfElectronIDs:cutBasedElectronID-Fall17-94X-V1-loose',
'mediumId': 'egmGsfElectronIDs:cutBasedElectronID-Fall17-94X-V1-medium',
'tightId': 'egmGsfElectronIDs:cutBasedElectronID-Fall17-94X-V1-tight',
'mvaWP90': 'egmGsfElectronIDs:mvaEleID-Fall17-noIso-V1-wp90',
'mvaWP80': 'egmGsfElectronIDs:mvaEleID-Fall17-noIso-V1-wp80',
'mvaWPLoose': 'egmGsfElectronIDs:mvaEleID-Fall17-noIso-V1-wpLoose',
'mvaIsoWP90': 'egmGsfElectronIDs:mvaEleID-Fall17-iso-V1-wp90',
'mvaIsoWP80': 'egmGsfElectronIDs:mvaEleID-Fall17-iso-V1-wp80',
'mvaIsoWPLoose': 'egmGsfElectronIDs:mvaEleID-Fall17-iso-V1-wpLoose',
'hltId': 'egmGsfElectronIDs:cutBasedElectronHLTPreselection-Summer16-V1', # seems like we don't have these for >= 2017?
'mvaValuesMap': 'electronMVAValueMapProducer:ElectronMVAEstimatorRun2Spring16GeneralPurposeV1Values',
#'mvaCategoriesMap': 'electronMVAValueMapProducer:ElectronMVAEstimatorRun2Spring16GeneralPurposeV1Categories',
'combIsoEA': 'RecoEgamma/ElectronIdentification/data/Fall17/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_92X.txt',
'ecalIsoEA': 'RecoEgamma/ElectronIdentification/data/Summer16/effAreaElectrons_HLT_ecalPFClusterIso.txt',
'hcalIsoEA': 'RecoEgamma/ElectronIdentification/data/Summer16/effAreaElectrons_HLT_hcalPFClusterIso.txt'
}
photonIdParams = {
'looseId': 'egmPhotonIDs:cutBasedPhotonID-Fall17-94X-V1-loose',
'mediumId': 'egmPhotonIDs:cutBasedPhotonID-Fall17-94X-V1-medium',
'tightId': 'egmPhotonIDs:cutBasedPhotonID-Fall17-94X-V1-tight',
'chIsoEA': 'RecoEgamma/PhotonIdentification/data/Fall17/effAreaPhotons_cone03_pfChargedHadrons_90percentBased_TrueVtx.txt',
'nhIsoEA': 'RecoEgamma/PhotonIdentification/data/Fall17/effAreaPhotons_cone03_pfNeutralHadrons_90percentBased_TrueVtx.txt',
'phIsoEA': 'RecoEgamma/PhotonIdentification/data/Fall17/effAreaPhotons_cone03_pfPhotons_90percentBased_TrueVtx.txt'
}
electronIdModules = [
'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Fall17_noIso_V1_cff',
'RecoEgamma.ElectronIdentification.Identification.mvaElectronID_Fall17_iso_V1_cff',
'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Fall17_94X_V1_cff',
'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronHLTPreselecition_Summer16_V1_cff'
]
photonIdModules = [
'RecoEgamma.PhotonIdentification.Identification.cutBasedPhotonID_Fall17_94X_V1_TrueVtx_cff'
]
from PhysicsTools.SelectorUtils.tools.vid_id_tools import setupAllVIDIdsInModule, setupVIDElectronSelection, setupVIDPhotonSelection, switchOnVIDElectronIdProducer, switchOnVIDPhotonIdProducer, DataFormat
# Loads egmGsfElectronIDs
switchOnVIDElectronIdProducer(process, DataFormat.MiniAOD)
for idmod in electronIdModules:
setupAllVIDIdsInModule(process, idmod, setupVIDElectronSelection)
switchOnVIDPhotonIdProducer(process, DataFormat.MiniAOD)
for idmod in photonIdModules:
setupAllVIDIdsInModule(process, idmod, setupVIDPhotonSelection)
process.load('PandaProd.Auxiliary.WorstIsolationProducer_cfi')
egmIdSequence = cms.Sequence(
process.photonIDValueMapProducer +
process.egmPhotonIDs +
process.electronMVAValueMapProducer +
process.egmGsfElectronIDs +
process.worstIsolationProducer
)
### REMAKE CHS JETS WITH DEEP FLAVOR
from PandaProd.Producer.utils.makeJets_cff import makeJets
slimmedJetsSequence = makeJets(process, options.isData, 'AK4PFchs', 'pfCHS', 'DeepFlavor')
if options.redojec:
### JET RE-CORRECTION
from PhysicsTools.PatAlgos.producersLayer1.jetUpdater_cff import updatedPatJetCorrFactors, updatedPatJets
jecLevels= ['L1FastJet', 'L2Relative', 'L3Absolute']
if options.isData:
jecLevels.append('L2L3Residual')
# slimmedJets made from scratch
#process.updatedPatJetCorrFactors = updatedPatJetCorrFactors.clone(
# src = cms.InputTag('slimmedJets', '', cms.InputTag.skipCurrentProcess()),
# levels = cms.vstring(*jecLevels),
#)
#
#process.slimmedJets = updatedPatJets.clone(
# jetSource = cms.InputTag('slimmedJets', '', cms.InputTag.skipCurrentProcess()),
# addJetCorrFactors = cms.bool(True),
# jetCorrFactorsSource = cms.VInputTag(cms.InputTag('updatedPatJetCorrFactors')),
# addBTagInfo = cms.bool(False),
# addDiscriminators = cms.bool(False)
#)
process.updatedPatJetCorrFactorsPuppi = updatedPatJetCorrFactors.clone(
src = cms.InputTag('slimmedJetsPuppi', '', cms.InputTag.skipCurrentProcess()),
levels = cms.vstring(*jecLevels),
)
process.slimmedJetsPuppi = updatedPatJets.clone(
jetSource = cms.InputTag('slimmedJetsPuppi', '', cms.InputTag.skipCurrentProcess()),
addJetCorrFactors = cms.bool(True),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag('updatedPatJetCorrFactorsPuppi')),
addBTagInfo = cms.bool(False),
addDiscriminators = cms.bool(False)
)
### MET RE-CORRECTION
# pfMet is already corrected above in the VANILLA MET section
runMetCorAndUncFromMiniAOD(
process,
isData = options.isData,
metType = "Puppi",
postfix = "Puppi",
jetFlavor = "AK4PFPuppi"
)
jetRecorrectionSequence = cms.Sequence(
#process.updatedPatJetCorrFactors +
#process.slimmedJets +
process.updatedPatJetCorrFactorsPuppi +
process.slimmedJetsPuppi +
process.fullPatMetSequencePuppi
)
else:
jetRecorrectionSequence = cms.Sequence()
### FAT JETS
from PandaProd.Producer.utils.makeFatJets_cff import initFatJets, makeFatJets
fatJetInitSequence = initFatJets(process, options.isData, ['AK8', 'CA15'])
ak8CHSSequence = makeFatJets(
process,
isData = options.isData,
label = 'AK8PFchs',
candidates = 'pfCHS'
)
ak8PuppiSequence = makeFatJets(
process,
isData = options.isData,
label = 'AK8PFPuppi',
candidates = 'puppi'
)
ca15PuppiSequence = makeFatJets(
process,
isData = options.isData,
label = 'CA15PFPuppi',
candidates = 'puppi'
)
fatJetSequence = cms.Sequence(
fatJetInitSequence +
ak8CHSSequence +
ak8PuppiSequence +
ca15PuppiSequence
)
### MERGE GEN PARTICLES
do_merge = not options.isData and False
if do_merge:
process.load('PandaProd.Auxiliary.MergedGenProducer_cfi')
genMergeSequence = cms.Sequence( process.mergedGenParticles )
else:
genMergeSequence = cms.Sequence()
### GEN JET FLAVORS
if not options.isData:
process.load('PhysicsTools.JetMCAlgos.HadronAndPartonSelector_cfi')
from PhysicsTools.JetMCAlgos.HadronAndPartonSelector_cfi import selectedHadronsAndPartons
from PhysicsTools.JetMCAlgos.GenHFHadronMatcher_cff import matchGenBHadron
from PhysicsTools.JetMCAlgos.GenHFHadronMatcher_cff import matchGenCHadron
from PhysicsTools.JetMCAlgos.AK4PFJetsMCFlavourInfos_cfi import ak4JetFlavourInfos
# Input particle collection for matching to gen jets (partons + leptons)
# MUST use use proper input jet collection: the jets to which hadrons should be associated
# rParam and jetAlgorithm MUST match those used for jets to be associated with hadrons
# More details on the tool: https://twiki.cern.ch/twiki/bin/view/CMSPublic/SWGuideBTagMCTools#New_jet_flavour_definition
process.selectedHadronsAndPartons.particles = 'mergedGenParticles' if do_merge else 'prunedGenParticles'
process.ak4GenJetFlavourInfos = ak4JetFlavourInfos.clone(
jets = 'slimmedGenJets'
)
process.ak8GenJetFlavourInfos = ak4JetFlavourInfos.clone(
jets = 'genJetsNoNuAK8',
rParam = 0.8
)
process.ca15GenJetFlavourInfos = ak4JetFlavourInfos.clone(
jets = 'genJetsNoNuCA15',
jetAlgorithm = 'CambridgeAachen',
rParam = 1.5
)
# Begin GenHFHadronMatcher subsequences
# Adapted from PhysicsTools/JetMCAlgos/test/matchGenHFHadrons.py
# Supplies PDG ID to real name resolution of MC particles
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.ak4MatchGenBHadron = matchGenBHadron.clone(
genParticles = process.selectedHadronsAndPartons.particles,
jetFlavourInfos = "ak4GenJetFlavourInfos"
)
process.ak4MatchGenCHadron = matchGenCHadron.clone(
genParticles = process.selectedHadronsAndPartons.particles,
jetFlavourInfos = "ak4GenJetFlavourInfos"
)
process.ak8MatchGenBHadron = matchGenBHadron.clone(
genParticles = process.selectedHadronsAndPartons.particles,
jetFlavourInfos = "ak8GenJetFlavourInfos"
)
process.ak8MatchGenCHadron = matchGenCHadron.clone(
genParticles = process.selectedHadronsAndPartons.particles,
jetFlavourInfos = "ak8GenJetFlavourInfos"
)
process.ca15MatchGenBHadron = matchGenBHadron.clone(
genParticles = process.selectedHadronsAndPartons.particles,
jetFlavourInfos = "ca15GenJetFlavourInfos"
)
process.ca15MatchGenCHadron = matchGenCHadron.clone(
genParticles = process.selectedHadronsAndPartons.particles,
jetFlavourInfos = "ca15GenJetFlavourInfos"
)
#End GenHFHadronMatcher subsequences
genJetFlavorSequence = cms.Sequence(
process.selectedHadronsAndPartons +
process.ak4GenJetFlavourInfos +
process.ak8GenJetFlavourInfos +
process.ca15GenJetFlavourInfos +
process.ak4MatchGenBHadron +
process.ak4MatchGenCHadron +
process.ak8MatchGenBHadron +
process.ak8MatchGenCHadron +
process.ca15MatchGenBHadron +
process.ca15MatchGenCHadron
)
else:
genJetFlavorSequence = cms.Sequence()
# runMetCorAnd.. adds a CaloMET module only once, adding the postfix
# However, repeated calls to the function overwrites the MET source of patCaloMet
process.patCaloMet.metSource = 'metrawCalo'
### MONOX FILTER
process.load('PandaProd.Filters.MonoXFilter_cfi')
process.MonoXFilter.taggingMode = True
### RECO PATH
process.reco = cms.Path(
egmCorrectionSequence +
egmIdSequence +
puppiSequence +
metSequence +
slimmedJetsSequence +
jetRecorrectionSequence +
process.MonoXFilter +
fatJetSequence +
genMergeSequence +
genJetFlavorSequence
)
#############
## NTULPES ##
#############
process.load('PandaProd.Producer.panda_cfi')
process.panda.isRealData = options.isData
process.panda.useTrigger = options.useTrigger
#process.panda.SelectEvents = ['reco'] # no skim
process.panda.fillers.chsAK4Jets.jets = 'slimmedJetsDeepFlavor'
if options.isData:
process.panda.fillers.partons.enabled = False
process.panda.fillers.genParticles.enabled = False
process.panda.fillers.ak4GenJets.enabled = False
process.panda.fillers.ak8GenJets.enabled = False
process.panda.fillers.ca15GenJets.enabled = False
else:
process.panda.fillers.weights.pdfType = options.pdfname
process.panda.fillers.extraMets.types.append('gen')
if not options.useTrigger:
process.panda.fillers.hlt.enabled = False
for name, value in electronIdParams.items():
setattr(process.panda.fillers.electrons, name, value)
for name, value in photonIdParams.items():
setattr(process.panda.fillers.photons, name, value)
process.panda.fillers.muons.rochesterCorrectionSource = 'PandaProd/Utilities/data/RoccoR2017v0.txt'
process.panda.outputFile = options.outputFile
process.panda.printLevel = options.printLevel
process.ntuples = cms.EndPath(process.panda)
##############
## SCHEDULE ##
##############
process.schedule = cms.Schedule(process.reco, process.ntuples)
if options.connect:
if options.connect == 'mit':
options.connect = 'frontier://(proxyurl=http://squid.cmsaf.mit.edu:3128)(proxyurl=http://squid1.cmsaf.mit.edu:3128)(proxyurl=http://squid2.cmsaf.mit.edu:3128)(serverurl=http://cmsfrontier.cern.ch:8000/FrontierProd)/CMS_CONDITIONS'
process.GlobalTag.connect = options.connect
for toGet in process.GlobalTag.toGet:
toGet.connect = options.connect
|
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for decoding protocol buffer primitives.
This code is very similar to encoder.py -- read the docs for that module first.
A "decoder" is a function with the signature:
Decode(buffer, pos, end, message, field_dict)
The arguments are:
buffer: The string containing the encoded message.
pos: The current position in the string.
end: The position in the string where the current message ends. May be
less than len(buffer) if we're reading a sub-message.
message: The message object into which we're parsing.
field_dict: message._fields (avoids a hashtable lookup).
The decoder reads the field and stores it into field_dict, returning the new
buffer position. A decoder for a repeated field may proactively decode all of
the elements of that field, if they appear consecutively.
Note that decoders may throw any of the following:
IndexError: Indicates a truncated message.
struct.error: Unpacking of a fixed-width field failed.
message.DecodeError: Other errors.
Decoders are expected to raise an exception if they are called with pos > end.
This allows callers to be lax about bounds checking: it's fineto read past
"end" as long as you are sure that someone else will notice and throw an
exception later on.
Something up the call stack is expected to catch IndexError and struct.error
and convert them to message.DecodeError.
Decoders are constructed using decoder constructors with the signature:
MakeDecoder(field_number, is_repeated, is_packed, key, new_default)
The arguments are:
field_number: The field number of the field we want to decode.
is_repeated: Is the field a repeated field? (bool)
is_packed: Is the field a packed field? (bool)
key: The key to use when looking up the field within field_dict.
(This is actually the FieldDescriptor but nothing in this
file should depend on that.)
new_default: A function which takes a message object as a parameter and
returns a new instance of the default value for this field.
(This is called for repeated fields and sub-messages, when an
instance does not already exist.)
As with encoders, we define a decoder constructor for every type of field.
Then, for every field of every message class we construct an actual decoder.
That decoder goes into a dict indexed by tag, so when we decode a message
we repeatedly read a tag, look up the corresponding decoder, and invoke it.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
import sys
import six
_UCS2_MAXUNICODE = 65535
if six.PY3:
long = int
else:
import re # pylint: disable=g-import-not-at-top
_SURROGATE_PATTERN = re.compile(six.u(r'[\ud800-\udfff]'))
from google.protobuf.internal import containers
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import message
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
_NAN = _POS_INF * 0
# This is not for optimization, but rather to avoid conflicts with local
# variables named "message".
_DecodeError = message.DecodeError
def _VarintDecoder(mask, result_type):
"""Return an encoder for a basic varint value (does not include tag).
Decoded values will be bitwise-anded with the given mask before being
returned, e.g. to limit them to 32 bits. The returned decoder does not
take the usual "end" parameter -- the caller is expected to do bounds checking
after the fact (often the caller can defer such checking until later). The
decoder returns a (value, new_pos) pair.
"""
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = six.indexbytes(buffer, pos)
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
result = result_type(result)
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
def _SignedVarintDecoder(bits, result_type):
"""Like _VarintDecoder() but decodes signed values."""
signbit = 1 << (bits - 1)
mask = (1 << bits) - 1
def DecodeVarint(buffer, pos):
result = 0
shift = 0
while 1:
b = six.indexbytes(buffer, pos)
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
result &= mask
result = (result ^ signbit) - signbit
result = result_type(result)
return (result, pos)
shift += 7
if shift >= 64:
raise _DecodeError('Too many bytes when decoding varint.')
return DecodeVarint
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
_DecodeVarint = _VarintDecoder((1 << 64) - 1, long)
_DecodeSignedVarint = _SignedVarintDecoder(64, long)
# Use these versions for values which must be limited to 32 bits.
_DecodeVarint32 = _VarintDecoder((1 << 32) - 1, int)
_DecodeSignedVarint32 = _SignedVarintDecoder(32, int)
def ReadTag(buffer, pos):
"""Read a tag from the memoryview, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
Args:
buffer: memoryview object of the encoded bytes
pos: int of the current position to start from
Returns:
Tuple[bytes, int] of the tag data and new position.
"""
start = pos
while six.indexbytes(buffer, pos) & 0x80:
pos += 1
pos += 1
tag_bytes = buffer[start:pos].tobytes()
return tag_bytes, pos
# --------------------------------------------------------------------
def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder
def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode)
def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
"""Decode serialized float to a float and new position.
Args:
buffer: memoryview of the serialized bytes
pos: int, position in the memory view to start at.
Returns:
Tuple[float, int] of the deserialized float value and new position
in the serialized data.
"""
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos].tobytes()
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'):
# If at least one significand bit is set...
if float_bytes[0:3] != b'\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3:4] == b'\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode)
def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
"""Decode serialized double to a double and new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
Returns:
Tuple[float, int] of the decoded double value and new position
in the serialized data.
"""
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos].tobytes()
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7:8] in b'\x7F\xFF')
and (double_bytes[6:7] >= b'\xF0')
and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode)
def EnumDecoder(field_number, is_repeated, is_packed, key, new_default):
enum_type = key.enum_type
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
"""Decode serialized packed enum to its value and a new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
value_start_pos = pos
(element, pos) = _DecodeSignedVarint32(buffer, pos)
# pylint: disable=protected-access
if element in enum_type.values_by_number:
value.append(element)
else:
if not message._unknown_fields:
message._unknown_fields = []
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_VARINT)
message._unknown_fields.append(
(tag_bytes, buffer[value_start_pos:pos].tobytes()))
if message._unknown_field_set is None:
message._unknown_field_set = containers.UnknownFieldSet()
message._unknown_field_set._add(
field_number, wire_format.WIRETYPE_VARINT, element)
# pylint: enable=protected-access
if pos > endpoint:
if element in enum_type.values_by_number:
del value[-1] # Discard corrupt value.
else:
del message._unknown_fields[-1]
# pylint: disable=protected-access
del message._unknown_field_set._values[-1]
# pylint: enable=protected-access
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_format.WIRETYPE_VARINT)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
"""Decode serialized repeated enum to its value and a new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = _DecodeSignedVarint32(buffer, pos)
# pylint: disable=protected-access
if element in enum_type.values_by_number:
value.append(element)
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append(
(tag_bytes, buffer[pos:new_pos].tobytes()))
if message._unknown_field_set is None:
message._unknown_field_set = containers.UnknownFieldSet()
message._unknown_field_set._add(
field_number, wire_format.WIRETYPE_VARINT, element)
# pylint: enable=protected-access
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
"""Decode serialized repeated enum to its value and a new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
value_start_pos = pos
(enum_value, pos) = _DecodeSignedVarint32(buffer, pos)
if pos > end:
raise _DecodeError('Truncated message.')
# pylint: disable=protected-access
if enum_value in enum_type.values_by_number:
field_dict[key] = enum_value
else:
if not message._unknown_fields:
message._unknown_fields = []
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_VARINT)
message._unknown_fields.append(
(tag_bytes, buffer[value_start_pos:pos].tobytes()))
if message._unknown_field_set is None:
message._unknown_field_set = containers.UnknownFieldSet()
message._unknown_field_set._add(
field_number, wire_format.WIRETYPE_VARINT, enum_value)
# pylint: enable=protected-access
return pos
return DecodeField
# --------------------------------------------------------------------
Int32Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint32)
Int64Decoder = _SimpleDecoder(
wire_format.WIRETYPE_VARINT, _DecodeSignedVarint)
UInt32Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint32)
UInt64Decoder = _SimpleDecoder(wire_format.WIRETYPE_VARINT, _DecodeVarint)
SInt32Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint32, wire_format.ZigZagDecode)
SInt64Decoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, wire_format.ZigZagDecode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Decoder = _StructPackDecoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatDecoder = _FloatDecoder()
DoubleDecoder = _DoubleDecoder()
BoolDecoder = _ModifiedDecoder(
wire_format.WIRETYPE_VARINT, _DecodeVarint, bool)
def StringDecoder(field_number, is_repeated, is_packed, key, new_default,
is_strict_utf8=False):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = six.text_type
def _ConvertToUnicode(memview):
"""Convert byte to unicode."""
byte_str = memview.tobytes()
try:
value = local_unicode(byte_str, 'utf-8')
except UnicodeDecodeError as e:
# add more information to the error message and re-raise it.
e.reason = '%s in field: %s' % (e, key.full_name)
raise
if is_strict_utf8 and six.PY2 and sys.maxunicode > _UCS2_MAXUNICODE:
# Only do the check for python2 ucs4 when is_strict_utf8 enabled
if _SURROGATE_PATTERN.search(value):
reason = ('String field %s contains invalid UTF-8 data when parsing'
'a protocol buffer: surrogates not allowed. Use'
'the bytes type if you intend to send raw bytes.') % (
key.full_name)
raise message.DecodeError(reason)
return value
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(_ConvertToUnicode(buffer[pos:new_pos]))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos])
return new_pos
return DecodeField
def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos].tobytes())
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos].tobytes()
return new_pos
return DecodeField
def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField
def MessageDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a message field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value.add()._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
if value._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
return new_pos
return DecodeField
# --------------------------------------------------------------------
MESSAGE_SET_ITEM_TAG = encoder.TagBytes(1, wire_format.WIRETYPE_START_GROUP)
def MessageSetItemDecoder(descriptor):
"""Returns a decoder for a MessageSet item.
The parameter is the message Descriptor.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
type_id_tag_bytes = encoder.TagBytes(2, wire_format.WIRETYPE_VARINT)
message_tag_bytes = encoder.TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)
item_end_tag_bytes = encoder.TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_ReadTag = ReadTag
local_DecodeVarint = _DecodeVarint
local_SkipField = SkipField
def DecodeItem(buffer, pos, end, message, field_dict):
"""Decode serialized message set to its value and new position.
Args:
buffer: memoryview of the serialized bytes.
pos: int, position in the memory view to start at.
end: int, end position of serialized data
message: Message object to store unknown fields in
field_dict: Map[Descriptor, Any] to store decoded values in.
Returns:
int, new position in serialized data.
"""
message_set_item_start = pos
type_id = -1
message_start = -1
message_end = -1
# Technically, type_id and message can appear in any order, so we need
# a little loop here.
while 1:
(tag_bytes, pos) = local_ReadTag(buffer, pos)
if tag_bytes == type_id_tag_bytes:
(type_id, pos) = local_DecodeVarint(buffer, pos)
elif tag_bytes == message_tag_bytes:
(size, message_start) = local_DecodeVarint(buffer, pos)
pos = message_end = message_start + size
elif tag_bytes == item_end_tag_bytes:
break
else:
pos = SkipField(buffer, pos, end, tag_bytes)
if pos == -1:
raise _DecodeError('Missing group end tag.')
if pos > end:
raise _DecodeError('Truncated message.')
if type_id == -1:
raise _DecodeError('MessageSet item missing type_id.')
if message_start == -1:
raise _DecodeError('MessageSet item missing message.')
extension = message.Extensions._FindExtensionByNumber(type_id)
# pylint: disable=protected-access
if extension is not None:
value = field_dict.get(extension)
if value is None:
message_type = extension.message_type
if not hasattr(message_type, '_concrete_class'):
# pylint: disable=protected-access
message._FACTORY.GetPrototype(message_type)
value = field_dict.setdefault(
extension, message_type._concrete_class())
if value._InternalParse(buffer, message_start,message_end) != message_end:
# The only reason _InternalParse would return early is if it encountered
# an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
else:
if not message._unknown_fields:
message._unknown_fields = []
message._unknown_fields.append(
(MESSAGE_SET_ITEM_TAG, buffer[message_set_item_start:pos].tobytes()))
if message._unknown_field_set is None:
message._unknown_field_set = containers.UnknownFieldSet()
message._unknown_field_set._add(
type_id,
wire_format.WIRETYPE_LENGTH_DELIMITED,
buffer[message_start:message_end].tobytes())
# pylint: enable=protected-access
return pos
return DecodeItem
# --------------------------------------------------------------------
def MapDecoder(field_descriptor, new_default, is_message_map):
"""Returns a decoder for a map field."""
key = field_descriptor
tag_bytes = encoder.TagBytes(field_descriptor.number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
local_DecodeVarint = _DecodeVarint
# Can't read _concrete_class yet; might not be initialized.
message_type = field_descriptor.message_type
def DecodeMap(buffer, pos, end, message, field_dict):
submsg = message_type._concrete_class()
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
submsg.Clear()
if submsg._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
if is_message_map:
value[submsg.key].CopyFrom(submsg.value)
else:
value[submsg.key] = submsg.value
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeMap
# --------------------------------------------------------------------
# Optimization is not as heavy here because calls to SkipField() are rare,
# except for handling end-group tags.
def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
# Previously ord(buffer[pos]) raised IndexError when pos is out of range.
# With this code, ord(b'') raises TypeError. Both are handled in
# python_message.py to generate a 'Truncated message' error.
while ord(buffer[pos:pos+1].tobytes()) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipFixed64(buffer, pos, end):
"""Skip a fixed64 value. Returns the new position."""
pos += 8
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _DecodeFixed64(buffer, pos):
"""Decode a fixed64."""
new_pos = pos + 8
return (struct.unpack('<Q', buffer[pos:new_pos])[0], new_pos)
def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos
def _DecodeUnknownFieldSet(buffer, pos, end_pos=None):
"""Decode UnknownFieldSet. Returns the UnknownFieldSet and new position."""
unknown_field_set = containers.UnknownFieldSet()
while end_pos is None or pos < end_pos:
(tag_bytes, pos) = ReadTag(buffer, pos)
(tag, _) = _DecodeVarint(tag_bytes, 0)
field_number, wire_type = wire_format.UnpackTag(tag)
if wire_type == wire_format.WIRETYPE_END_GROUP:
break
(data, pos) = _DecodeUnknownField(buffer, pos, wire_type)
# pylint: disable=protected-access
unknown_field_set._add(field_number, wire_type, data)
return (unknown_field_set, pos)
def _DecodeUnknownField(buffer, pos, wire_type):
"""Decode a unknown field. Returns the UnknownField and new position."""
if wire_type == wire_format.WIRETYPE_VARINT:
(data, pos) = _DecodeVarint(buffer, pos)
elif wire_type == wire_format.WIRETYPE_FIXED64:
(data, pos) = _DecodeFixed64(buffer, pos)
elif wire_type == wire_format.WIRETYPE_FIXED32:
(data, pos) = _DecodeFixed32(buffer, pos)
elif wire_type == wire_format.WIRETYPE_LENGTH_DELIMITED:
(size, pos) = _DecodeVarint(buffer, pos)
data = buffer[pos:pos+size].tobytes()
pos += size
elif wire_type == wire_format.WIRETYPE_START_GROUP:
(data, pos) = _DecodeUnknownFieldSet(buffer, pos)
elif wire_type == wire_format.WIRETYPE_END_GROUP:
return (0, -1)
else:
raise _DecodeError('Wrong wire type in tag.')
return (data, pos)
def _EndGroup(buffer, pos, end):
"""Skipping an END_GROUP tag returns -1 to tell the parent loop to break."""
return -1
def _SkipFixed32(buffer, pos, end):
"""Skip a fixed32 value. Returns the new position."""
pos += 4
if pos > end:
raise _DecodeError('Truncated message.')
return pos
def _DecodeFixed32(buffer, pos):
"""Decode a fixed32."""
new_pos = pos + 4
return (struct.unpack('<I', buffer[pos:new_pos])[0], new_pos)
def _RaiseInvalidWireType(buffer, pos, end):
"""Skip function for unknown wire types. Raises an exception."""
raise _DecodeError('Tag had invalid wire type.')
def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = ord(tag_bytes[0:1]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField
SkipField = _FieldSkipper()
|
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from botocore.exceptions import ClientError
import json
from c7n.actions import RemovePolicyBase
from c7n.filters import CrossAccountAccessFilter, MetricsFilter, FilterRegistry
from c7n.manager import resources
from c7n.utils import local_session
from c7n.query import QueryResourceManager
from c7n.actions import BaseAction
from c7n.utils import type_schema
from c7n.tags import RemoveTag, Tag, TagActionFilter, TagDelayedAction, universal_augment
filters = FilterRegistry('sqs.filters')
filters.register('marked-for-op', TagActionFilter)
@resources.register('sqs')
class SQS(QueryResourceManager):
class resource_type(object):
service = 'sqs'
type = None
# type = 'queue'
enum_spec = ('list_queues', 'QueueUrls', None)
detail_spec = ("get_queue_attributes", "QueueUrl", None, "Attributes")
id = 'QueueUrl'
filter_name = 'QueueNamePrefix'
filter_type = 'scalar'
name = 'QueueUrl'
date = 'CreatedTimestamp'
dimension = 'QueueName'
default_report_fields = (
'QueueArn',
'CreatedTimestamp',
'ApproximateNumberOfMessages',
)
filter_registry = filters
def get_permissions(self):
perms = super(SQS, self).get_permissions()
perms.append('sqs:GetQueueAttributes')
return perms
def get_arns(self, resources):
return [r['QueueArn'] for r in resources]
def get_resources(self, ids, cache=True):
ids_normalized = []
for i in ids:
if not i.startswith('https://'):
ids_normalized.append(i)
continue
ids_normalized.append(i.rsplit('/', 1)[-1])
return super(SQS, self).get_resources(ids_normalized, cache)
def augment(self, resources):
client = local_session(self.session_factory).client('sqs')
def _augment(r):
try:
queue = self.retry(
client.get_queue_attributes,
QueueUrl=r,
AttributeNames=['All'])['Attributes']
queue['QueueUrl'] = r
except ClientError as e:
if e.response['Error']['Code'] == 'AWS.SimpleQueueService.NonExistentQueue':
return
if e.response['Error']['Code'] == 'AccessDenied':
self.log.warning("Denied access to sqs %s" % r)
return
raise
return queue
with self.executor_factory(max_workers=2) as w:
return universal_augment(
self, list(filter(None, w.map(_augment, resources))))
@SQS.filter_registry.register('metrics')
class MetricsFilter(MetricsFilter):
def get_dimensions(self, resource):
return [
{'Name': 'QueueName',
'Value': resource['QueueUrl'].rsplit('/', 1)[-1]}]
@SQS.filter_registry.register('cross-account')
class SQSCrossAccount(CrossAccountAccessFilter):
"""Filter SQS queues which have cross account permissions
:example:
.. code-block:: yaml
policies:
- name: sqs-cross-account
resource: sqs
filters:
- type: cross-account
"""
permissions = ('sqs:GetQueueAttributes',)
@SQS.action_registry.register('remove-statements')
class RemovePolicyStatement(RemovePolicyBase):
"""Action to remove policy statements from SQS
:example:
.. code-block:: yaml
policies:
- name: sqs-cross-account
resource: sqs
filters:
- type: cross-account
actions:
- type: remove-statements
statement_ids: matched
"""
permissions = ('sqs:GetQueueAttributes', 'sqs:RemovePermission')
def process(self, resources):
results = []
client = local_session(self.manager.session_factory).client('sqs')
for r in resources:
try:
results += filter(None, [self.process_resource(client, r)])
except Exception:
self.log.exception(
"Error processing sns:%s", r['QueueUrl'])
return results
def process_resource(self, client, resource):
p = resource.get('Policy')
if p is None:
return
p = json.loads(resource['Policy'])
statements, found = self.process_policy(
p, resource, CrossAccountAccessFilter.annotation_key)
if not found:
return
for f in found:
client.remove_permission(
QueueUrl=resource['QueueUrl'],
Label=f['Sid'])
return {'Name': resource['QueueUrl'],
'State': 'PolicyRemoved',
'Statements': found}
@SQS.action_registry.register('mark-for-op')
class MarkForOpQueue(TagDelayedAction):
"""Action to specify an action to occur at a later date
:example:
.. code-block:: yaml
policies:
- name: sqs-delete-unused
resource: sqs
filters:
- "tag:custodian_cleanup": absent
actions:
- type: mark-for-op
tag: custodian_cleanup
msg: "Unused queues"
op: delete
days: 7
"""
permissions = ('sqs:TagQueue',)
def process_resource_set(self, queues, tags):
client = local_session(self.manager.session_factory).client(
'sqs')
tag_dict = {}
for t in tags:
tag_dict[t['Key']] = t['Value']
for queue in queues:
queue_url = queue['QueueUrl']
try:
client.tag_queue(QueueUrl=queue_url, Tags=tag_dict)
except Exception as err:
self.log.exception(
'Exception tagging queue %s: %s',
queue['QueueArn'], err)
continue
@SQS.action_registry.register('tag')
class TagQueue(Tag):
"""Action to create tag(s) on a queue
:example:
.. code-block:: yaml
policies:
- name: tag-sqs
resource: sqs
filters:
- "tag:target-tag": absent
actions:
- type: tag
key: target-tag
value: target-tag-value
"""
permissions = ('sqs:TagQueue',)
def process_resource_set(self, queues, tags):
client = local_session(self.manager.session_factory).client(
'sqs')
tag_dict = {}
for t in tags:
tag_dict[t['Key']] = t['Value']
for queue in queues:
queue_url = queue['QueueUrl']
try:
client.tag_queue(QueueUrl=queue_url, Tags=tag_dict)
except Exception as err:
self.log.exception(
'Exception tagging queue %s: %s',
queue['QueueArn'], err)
continue
@SQS.action_registry.register('remove-tag')
class UntagQueue(RemoveTag):
"""Action to remove tag(s) on a queue
:example:
.. code-block:: yaml
policies:
- name: sqs-remove-tag
resource: sqs
filters:
- "tag:OutdatedTag": present
actions:
- type: remove-tag
tags: ["OutdatedTag"]
"""
permissions = ('sqs:UntagQueue',)
def process_resource_set(self, queues, tags):
client = local_session(self.manager.session_factory).client(
'sqs')
for queue in queues:
queue_url = queue['QueueUrl']
try:
client.untag_queue(QueueUrl=queue_url, TagKeys=tags)
except Exception as err:
self.log.exception(
'Exception while removing tags from queue %s: %s',
queue['QueueArn'], err)
continue
@SQS.action_registry.register('delete')
class DeleteSqsQueue(BaseAction):
"""Action to delete a SQS queue
To prevent unwanted deletion of SQS queues, it is recommended
to include a filter
:example:
.. code-block:: yaml
policies:
- name: sqs-delete
resource: sqs
filters:
- KmsMasterKeyId: absent
actions:
- type: delete
"""
schema = type_schema('delete')
permissions = ('sqs:DeleteQueue',)
def process(self, queues):
client = local_session(self.manager.session_factory).client('sqs')
for q in queues:
self.process_queue(client, q)
def process_queue(self, client, queue):
try:
client.delete_queue(QueueUrl=queue['QueueUrl'])
except (client.exceptions.QueueDoesNotExist,
client.exceptions.QueueDeletedRecently):
pass
@SQS.action_registry.register('set-encryption')
class SetEncryption(BaseAction):
"""Action to set encryption key on SQS queue
:example:
.. code-block:: yaml
policies:
- name: sqs-set-encrypt
resource: sqs
filters:
- KmsMasterKeyId: absent
actions:
- type: set_encryption
key: "<alias of kms key>"
"""
schema = type_schema(
'set-encryption',
key={'type': 'string'}, required=('key',))
permissions = ('sqs:SetQueueAttributes',)
def process(self, queues):
# get KeyId
key = "alias/" + self.data.get('key')
session = local_session(self.manager.session_factory)
key_id = session.client(
'kms').describe_key(KeyId=key)['KeyMetadata']['KeyId']
client = session.client('sqs')
for q in queues:
self.process_queue(client, q, key_id)
def process_queue(self, client, queue, key_id):
try:
client.set_queue_attributes(
QueueUrl=queue['QueueUrl'],
Attributes={'KmsMasterKeyId': key_id}
)
except (client.exceptions.QueueDoesNotExist,) as e:
self.log.exception(
"Exception modifying queue:\n %s" % e)
@SQS.action_registry.register('set-retention-period')
class SetRetentionPeriod(BaseAction):
"""Action to set the retention period on an SQS queue (in seconds)
:example:
.. code-block:: yaml
policies:
- name: sqs-reduce-long-retention-period
resource: sqs
filters:
- type: value
key: MessageRetentionPeriod
value_type: integer
value: 345600
op: ge
actions:
- type: set-retention-period
period: 86400
"""
schema = type_schema(
'set-retention-period',
period={'type': 'integer',
'minimum': 60, 'exclusiveMinimum': True,
'maximum': 1209600, 'exclusiveMaximum': True})
permissions = ('sqs:SetQueueAttributes',)
def process(self, queues):
client = local_session(self.manager.session_factory).client('sqs')
period = str(self.data.get('period', 345600))
for q in queues:
client.set_queue_attributes(
QueueUrl=q['QueueUrl'],
Attributes={
'MessageRetentionPeriod': period})
|
|
from django.conf import settings
from django.db.models.sql import aggregates as sqlaggregates
from django.db.models.sql.compiler import SQLCompiler
from django.db.models.sql.constants import LOOKUP_SEP, MULTI, SINGLE
from django.db.models.sql.where import AND, OR
from django.db.utils import DatabaseError, IntegrityError
from django.utils.tree import Node
import random
EMULATED_OPS = {
'exact': lambda x, y: y in x if isinstance(x, (list,tuple)) else x == y,
'iexact': lambda x, y: x.lower() == y.lower(),
'startswith': lambda x, y: x.startswith(y),
'istartswith': lambda x, y: x.lower().startswith(y.lower()),
'isnull': lambda x, y: x is None if y else x is not None,
'in': lambda x, y: x in y,
'lt': lambda x, y: x < y,
'lte': lambda x, y: x <= y,
'gt': lambda x, y: x > y,
'gte': lambda x, y: x >= y,
}
class NonrelQuery(object):
# ----------------------------------------------
# Public API
# ----------------------------------------------
def __init__(self, compiler, fields):
self.fields = fields
self.compiler = compiler
self.connection = compiler.connection
self.query = self.compiler.query
self._negated = False
def fetch(self, low_mark=0, high_mark=None):
raise NotImplementedError('Not implemented')
def count(self, limit=None):
raise NotImplementedError('Not implemented')
def delete(self):
raise NotImplementedError('Not implemented')
def order_by(self, ordering):
raise NotImplementedError('Not implemented')
# Used by add_filters()
def add_filter(self, column, lookup_type, negated, db_type, value):
raise NotImplementedError('Not implemented')
# This is just a default implementation. You might want to override this
# in case your backend supports OR queries
def add_filters(self, filters):
"""Traverses the given Where tree and adds the filters to this query"""
if filters.negated:
self._negated = not self._negated
if not self._negated and filters.connector != AND:
raise DatabaseError('Only AND filters are supported')
# Remove unneeded children from tree
children = self._get_children(filters.children)
if self._negated and filters.connector != OR and len(children) > 1:
raise DatabaseError("When negating a whole filter subgroup "
"(e.g., a Q object) the subgroup filters must "
"be connected via OR, so the non-relational "
"backend can convert them like this: "
'"not (a OR b) => (not a) AND (not b)".')
for child in children:
if isinstance(child, Node):
self.add_filters(child)
continue
column, lookup_type, db_type, value = self._decode_child(child)
self.add_filter(column, lookup_type, self._negated, db_type, value)
if filters.negated:
self._negated = not self._negated
# ----------------------------------------------
# Internal API for reuse by subclasses
# ----------------------------------------------
def _decode_child(self, child):
constraint, lookup_type, annotation, value = child
packed, value = constraint.process(lookup_type, value, self.connection)
alias, column, db_type = packed
if alias and alias != self.query.model._meta.db_table:
raise DatabaseError("This database doesn't support JOINs "
"and multi-table inheritance.")
value = self._normalize_lookup_value(value, annotation, lookup_type)
return column, lookup_type, db_type, value
def _normalize_lookup_value(self, value, annotation, lookup_type):
# Django fields always return a list (see Field.get_db_prep_lookup)
# except if get_db_prep_lookup got overridden by a subclass
if lookup_type not in ('in', 'range', 'year') and isinstance(value, (tuple, list)):
if len(value) > 1:
raise DatabaseError('Filter lookup type was: %s. Expected the '
'filters value not to be a list. Only "in"-filters '
'can be used with lists.'
% lookup_type)
elif lookup_type == 'isnull':
value = annotation
else:
value = value[0]
if isinstance(value, unicode):
value = unicode(value)
elif isinstance(value, str):
value = str(value)
if lookup_type in ('startswith', 'istartswith'):
value = value[:-1]
elif lookup_type in ('endswith', 'iendswith'):
value = value[1:]
elif lookup_type in ('contains', 'icontains'):
value = value[1:-1]
return value
def _get_children(self, children):
# Filter out nodes that were automatically added by sql.Query, but are
# not necessary with emulated negation handling code
result = []
for child in children:
if isinstance(child, Node) and child.negated and \
len(child.children) == 1 and \
isinstance(child.children[0], tuple):
node, lookup_type, annotation, value = child.children[0]
if lookup_type == 'isnull' and value == True and node.field is None:
continue
result.append(child)
return result
def _matches_filters(self, entity, filters):
# Filters without rules match everything
if not filters.children:
return True
result = filters.connector == AND
for child in filters.children:
if isinstance(child, Node):
submatch = self._matches_filters(entity, child)
else:
constraint, lookup_type, annotation, value = child
packed, value = constraint.process(lookup_type, value, self.connection)
alias, column, db_type = packed
if alias != self.query.model._meta.db_table:
raise DatabaseError("This database doesn't support JOINs "
"and multi-table inheritance.")
# Django fields always return a list (see Field.get_db_prep_lookup)
# except if get_db_prep_lookup got overridden by a subclass
if lookup_type != 'in' and isinstance(value, (tuple, list)):
if len(value) > 1:
raise DatabaseError('Filter lookup type was: %s. '
'Expected the filters value not to be a list. '
'Only "in"-filters can be used with lists.'
% lookup_type)
elif lookup_type == 'isnull':
value = annotation
else:
value = value[0]
submatch = EMULATED_OPS[lookup_type](entity[column], value)
if filters.connector == OR and submatch:
result = True
break
elif filters.connector == AND and not submatch:
result = False
break
if filters.negated:
return not result
return result
def _order_in_memory(self, lhs, rhs):
for order in self.compiler._get_ordering():
if LOOKUP_SEP in order:
raise DatabaseError("JOINs in ordering not supported (%s)" % order)
if order == '?':
result = random.choice([1, 0, -1])
else:
column = order.lstrip('-')
result = cmp(lhs.get(column), rhs.get(column))
if order.startswith('-'):
result *= -1
if result != 0:
return result
return 0
def convert_value_from_db(self, db_type, value):
return self.compiler.convert_value_from_db(db_type, value)
def convert_value_for_db(self, db_type, value):
return self.compiler.convert_value_for_db(db_type, value)
class NonrelCompiler(SQLCompiler):
"""
Base class for non-relational compilers. Provides in-memory filter matching
and ordering. Entities are assumed to be dictionaries where the keys are
column names.
"""
# ----------------------------------------------
# Public API
# ----------------------------------------------
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
self.check_query()
fields = self.get_fields()
low_mark = self.query.low_mark
high_mark = self.query.high_mark
for entity in self.build_query(fields).fetch(low_mark, high_mark):
yield self._make_result(entity, fields)
def _make_result(self, entity, fields):
result = []
for field in fields:
if not field.null and entity.get(field.column,
field.get_default()) is None:
raise DatabaseError("Non-nullable field %s can't be None!" % field.name)
result.append(self.convert_value_from_db(field.db_type(
connection=self.connection), entity.get(field.column, field.get_default())))
return result
def has_results(self):
return self.get_count(check_exists=True)
def execute_sql(self, result_type=MULTI):
"""
Handles aggregate/count queries
"""
aggregates = self.query.aggregate_select.values()
# Simulate a count()
if aggregates:
assert len(aggregates) == 1
aggregate = aggregates[0]
assert isinstance(aggregate, sqlaggregates.Count)
meta = self.query.get_meta()
assert aggregate.col == '*' or aggregate.col == (meta.db_table, meta.pk.column)
count = self.get_count()
if result_type is SINGLE:
return [count]
elif result_type is MULTI:
return [[count]]
raise NotImplementedError('The database backend only supports count() queries')
# ----------------------------------------------
# Additional NonrelCompiler API
# ----------------------------------------------
def check_query(self):
if (len([a for a in self.query.alias_map if self.query.alias_refcount[a]]) > 1
or self.query.distinct or self.query.extra or self.query.having):
raise DatabaseError('This query is not supported by the database.')
def get_count(self, check_exists=False):
"""
Counts matches using the current filter constraints.
"""
if check_exists:
high_mark = 1
else:
high_mark = self.query.high_mark
return self.build_query().count(high_mark)
def build_query(self, fields=None):
if fields is None:
fields = self.get_fields()
query = self.query_class(self, fields)
query.add_filters(self.query.where)
query.order_by(self._get_ordering())
# This at least satisfies the most basic unit tests
if settings.DEBUG:
self.connection.queries.append({'sql': repr(query)})
return query
def get_fields(self):
"""
Returns the fields which should get loaded from the backend by self.query
"""
# We only set this up here because
# related_select_fields isn't populated until
# execute_sql() has been called.
if self.query.select_fields:
fields = self.query.select_fields + self.query.related_select_fields
else:
fields = self.query.model._meta.fields
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.model._meta.db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
for field in fields:
if field.model._meta != self.query.model._meta:
raise DatabaseError('Multi-table inheritance is not supported '
'by non-relational DBs.')
return fields
def _get_ordering(self):
if not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = self.query.order_by or self.query.get_meta().ordering
result = []
for order in ordering:
if LOOKUP_SEP in order:
raise DatabaseError("Ordering can't span tables on non-relational backends (%s)" % order)
if order == '?':
raise DatabaseError("Randomized ordering isn't supported by the backend")
order = order.lstrip('+')
descending = order.startswith('-')
name = order.lstrip('-')
if name == 'pk':
name = self.query.get_meta().pk.name
order = '-' + name if descending else name
if self.query.standard_ordering:
result.append(order)
else:
if descending:
result.append(name)
else:
result.append('-' + name)
return result
class NonrelInsertCompiler(object):
def execute_sql(self, return_id=False):
data = {}
for (field, value), column in zip(self.query.values, self.query.columns):
if field is not None:
if not field.null and value is None:
raise DatabaseError("You can't set %s (a non-nullable "
"field) to None!" % field.name)
value = self.convert_value_for_db(field.db_type(connection=self.connection),
value)
data[column] = value
return self.insert(data, return_id=return_id)
class NonrelUpdateCompiler(object):
def execute_sql(self, result_type=MULTI):
# TODO: We don't yet support QuerySet.update() in Django-nonrel
raise NotImplementedError('No updates')
class NonrelDeleteCompiler(object):
def execute_sql(self, result_type=MULTI):
self.build_query([self.query.get_meta().pk]).delete()
|
|
##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import functools
import imath
import IECore
import Gaffer
import GafferUI
import GafferOSL
import _CodeMenu
Gaffer.Metadata.registerNode(
GafferOSL.OSLCode,
"description",
"""
Allows arbitrary OSL shaders to be written directly within
Gaffer.
""",
"layout:customWidget:error:widgetType", "GafferOSLUI.OSLCodeUI._ErrorWidget",
"layout:customWidget:error:section", "Settings.Code",
"layout:customWidget:error:index", -1,
"layout:section:Settings.Inputs:collapsed", False,
"layout:section:Settings.Outputs:collapsed", False,
"layout:section:Settings.Code:collapsed", False,
plugs = {
"name" : [
"description", "Generated automatically - do not edit.",
"plugValueWidget:type", "",
],
"type" : [
"description", "Generated automatically - do not edit.",
"plugValueWidget:type", "",
],
"parameters" : [
"description",
"""
The inputs to the shader. Any number of inputs may be created
by adding child plugs. Supported plug types and the corresponding
OSL types are :
- FloatPlug (`float`)
- IntPlug (`int`)
- ColorPlug (`color`)
- V3fPlug (`vector`)
- M44fPlug (`matrix`)
- StringPlug (`string`)
- ClosurePlug (`closure color`)
- SplinefColor3f ( triplet of `float [], color [], string` )
""",
"layout:customWidget:footer:widgetType", "GafferOSLUI.OSLCodeUI._ParametersFooter",
"layout:customWidget:footer:index", -1,
"layout:section", "Settings.Inputs",
],
"parameters.*" : [
"labelPlugValueWidget:renameable", True,
# Since the names are used directly as variable names in the code,
# it's best to avoid any fancy label formatting for them.
"label", lambda plug : plug.getName(),
],
"out" : [
"description",
"""
The outputs from the shader. Any number of outputs may be created
by adding child plugs. Supported plug types are as for the input
parameters, with the exception of SplinefColor3f, which cannot be
used as an output.
""",
"plugValueWidget:type", "GafferUI.LayoutPlugValueWidget",
"layout:customWidget:footer:widgetType", "GafferOSLUI.OSLCodeUI._ParametersFooter",
"layout:customWidget:footer:index", -1,
"layout:section", "Settings.Outputs",
],
"out.*" : [
"labelPlugValueWidget:renameable", True,
"label", lambda plug : plug.getName(),
],
"code" : [
"description",
"""
The code for the body of the OSL shader. This should read from the
input parameters and write to the output parameters.
""",
"nodule:type", "",
"plugValueWidget:type", "GafferOSLUI.OSLCodeUI._CodePlugValueWidget",
"multiLineStringPlugValueWidget:role", "code",
"layout:label", "",
"layout:section", "Settings.Code",
],
}
)
##########################################################################
# _ParametersFooter
##########################################################################
class _ParametersFooter( GafferUI.PlugValueWidget ) :
def __init__( self, plug ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal )
GafferUI.PlugValueWidget.__init__( self, row, plug )
with row :
GafferUI.Spacer( imath.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) )
menuButton = GafferUI.MenuButton(
image = "plus.png",
hasFrame = False,
menu = GafferUI.Menu(
Gaffer.WeakMethod( self.__menuDefinition ),
title = "Add " + ( "Input" if plug.direction() == plug.Direction.In else "Output" )
),
toolTip = "Add " + ( "Input" if plug.direction() == plug.Direction.In else "Output" ),
)
menuButton.setEnabled( not Gaffer.MetadataAlgo.readOnly( plug ) )
GafferUI.Spacer( imath.V2i( 1 ), imath.V2i( 999999, 1 ), parenting = { "expand" : True } )
def _updateFromPlug( self ) :
self.setEnabled( self._editable() )
def __menuDefinition( self ) :
result = IECore.MenuDefinition()
labelsAndConstructors = [
( "Int", Gaffer.IntPlug ),
( "Float", Gaffer.FloatPlug ),
( "Vector", functools.partial( Gaffer.V3fPlug, interpretation = IECore.GeometricData.Interpretation.Vector ) ),
( "Normal", functools.partial( Gaffer.V3fPlug, interpretation = IECore.GeometricData.Interpretation.Normal ) ),
( "Point", functools.partial( Gaffer.V3fPlug, interpretation = IECore.GeometricData.Interpretation.Point ) ),
( "Color", Gaffer.Color3fPlug ),
( "Matrix", Gaffer.M44fPlug ),
( "String", Gaffer.StringPlug ),
( "Closure", GafferOSL.ClosurePlug )
]
if self.getPlug().direction() == Gaffer.Plug.Direction.In :
labelsAndConstructors.insert(
-1,
( "Color Spline",
functools.partial(
Gaffer.SplinefColor3fPlug,
defaultValue = IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
(
( 0, imath.Color3f( 0 ) ),
( 0, imath.Color3f( 0 ) ),
( 1, imath.Color3f( 1 ) ),
( 1, imath.Color3f( 1 ) ),
)
)
)
)
)
for label, constructor in labelsAndConstructors :
result.append(
"/" + label,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), constructor ),
}
)
return result
def __addPlug( self, plugConstructor ) :
direction = self.getPlug().direction()
plug = plugConstructor(
name = "input1" if direction == Gaffer.Plug.Direction.In else "output1",
direction = self.getPlug().direction(),
flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic
)
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().addChild( plug )
##########################################################################
# _CodePlugValueWidget
##########################################################################
class _CodePlugValueWidget( GafferUI.MultiLineStringPlugValueWidget ) :
def __init__( self, plug, **kw ) :
GafferUI.MultiLineStringPlugValueWidget.__init__( self, plug, **kw )
self.textWidget().setRole( GafferUI.MultiLineTextWidget.Role.Code )
self.textWidget().dropTextSignal().connect( Gaffer.WeakMethod( self.__dropText ), scoped = False )
def __dropText( self, widget, dragData ) :
if not isinstance( dragData, Gaffer.Plug ) :
return None
plug = dragData
node = plug.node()
if plug.parent() not in ( node["parameters"], node["out"] ) :
return None
if isinstance( plug, Gaffer.SplinefColor3fPlug ) :
return "colorSpline( {0}Positions, {0}Values, {0}Basis, u )".format( plug.getName() )
return plug.getName()
##########################################################################
# _ErrorWidget
##########################################################################
class _ErrorWidget( GafferUI.Widget ) :
def __init__( self, node, **kw ) :
self.__messageWidget = GafferUI.MessageWidget()
GafferUI.Widget.__init__( self, self.__messageWidget, **kw )
node.errorSignal().connect( Gaffer.WeakMethod( self.__error ), scoped = False )
node.shaderCompiledSignal().connect( Gaffer.WeakMethod( self.__shaderCompiled ), scoped = False )
self.__messageWidget.setVisible( False )
def __error( self, plug, source, error ) :
self.__messageWidget.clear()
self.__messageWidget.messageHandler().handle( IECore.Msg.Level.Error, "Compilation error", error )
self.__messageWidget.setVisible( True )
def __shaderCompiled( self ) :
self.__messageWidget.setVisible( False )
##########################################################################
# Plug menu
##########################################################################
## \todo This functionality is duplicated in several places (NodeUI,
# BoxUI, CompoundDataPlugValueWidget). It would be better if we could
# just control it in one place with a "plugValueWidget:removeable"
# metadata value. This main reason we can't do that right now is that
# we'd want to register the metadata with "parameters.*", but that would
# match "parameters.vector.x" as well as "parameters.vector". This is
# a general problem we have with the metadata matching - we should make
# '.' unmatchable by '*'.
def __deletePlug( plug ) :
with Gaffer.UndoScope( plug.ancestor( Gaffer.ScriptNode ) ) :
plug.parent().removeChild( plug )
def __plugPopupMenu( menuDefinition, plugValueWidget ) :
plug = plugValueWidget.getPlug()
node = plug.node()
if not isinstance( node, GafferOSL.OSLCode ) :
return
if plug.parent() in ( node["parameters"], node["out"] ) :
menuDefinition.append( "/DeleteDivider", { "divider" : True } )
menuDefinition.append(
"/Delete",
{
"command" : functools.partial( __deletePlug, plug ),
"active" : not plugValueWidget.getReadOnly() and not Gaffer.MetadataAlgo.readOnly( plug )
}
)
elif plug.isSame( node["code"] ) :
if len( menuDefinition.items() ) :
menuDefinition.prepend( "/InsertDivider", { "divider" : True } )
menuDefinition.prepend(
"/Insert",
{
"subMenu" : functools.partial(
_CodeMenu.commonFunctionMenu,
command = plugValueWidget.textWidget().insertText,
activator = lambda : not plugValueWidget.getReadOnly() and not Gaffer.MetadataAlgo.readOnly( plug ),
),
},
)
GafferUI.PlugValueWidget.popupMenuSignal().connect( __plugPopupMenu, scoped = False )
##########################################################################
# NodeEditor tool menu
##########################################################################
def __toolMenu( nodeEditor, node, menuDefinition ) :
if not isinstance( node, GafferOSL.OSLCode ) :
return
menuDefinition.append( "/ExportDivider", { "divider" : True } )
menuDefinition.append( "/Export OSL Shader...", { "command" : functools.partial( __exportOSLShader, nodeEditor, node ) } )
def __exportOSLShader( nodeEditor, node ) :
bookmarks = GafferUI.Bookmarks.acquire( node, category="shader" )
path = Gaffer.FileSystemPath( bookmarks.getDefault( nodeEditor ) )
path.setFilter( Gaffer.FileSystemPath.createStandardFilter( [ "osl" ] ) )
dialogue = GafferUI.PathChooserDialogue( path, title="Export OSL Shader", confirmLabel="Export", leaf=True, bookmarks=bookmarks )
path = dialogue.waitForPath( parentWindow = nodeEditor.ancestor( GafferUI.Window ) )
if not path :
return
path = str( path )
if not path.endswith( ".osl" ) :
path += ".osl"
with GafferUI.ErrorDialogue.ErrorHandler( title = "Error Exporting Shader", parentWindow = nodeEditor.ancestor( GafferUI.Window ) ) :
with open( path, "w" ) as f :
with nodeEditor.getContext() :
f.write( node.source( os.path.splitext( os.path.basename( path ) )[0] ) )
GafferUI.NodeEditor.toolMenuSignal().connect( __toolMenu, scoped = False )
|
|
#!/usr/bin/env python
#=============================================================================#
# #
# NAME: verify_image_data.py #
# #
# USAGE: ./verify_image_data.py PATH/TO/DATA-DIRECTORY [-I patI] [-Q patQ] #
# [-U patU] [-h --help] #
# #
# PURPOSE: Read and verify the Stokes I, Q & U image-FITS data, perform #
# simple sanity checks and write vectors of frequency and filename #
# to ASCII files on disk. #
# #
# MODIFIED: 19-November-2015 by C. Purcell #
# #
#=============================================================================#
# #
# The MIT License (MIT) #
# #
# Copyright (c) 2015 Cormac R. Purcell #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#=============================================================================#
# Default wildcard patterns to match the Stokes I Q and U files.
patIdefault = '*I.fits'
patQdefault = '*Q.fits'
patUdefault = '*U.fits'
# END USER EDITS -------------------------------------------------------------#
import os
import sys
import re
import glob
import argparse
import numpy as np
import astropy.io.fits as pf
from Imports.util_PPC import sort_nicely
#-----------------------------------------------------------------------------#
def main():
"""
Start the verify_data function if called from the command line.
"""
# Help string to be shown using the -h option
descStr = """
Scan a directory for FITS format files containing Stokes I, Q and U data.
Each FITS file should contain a single image (frequency plane) from a cube
of data. The script sorts the list of files by frequency (read from the
CRVAL3 header key) and writes ordered vectors of frequency and filename to
ASCII text files in the data directory. Files for each Stokes
parameter should have a unique naming format, matched by the wildcard
patterns. Default patterns are set at the top of the script and may be
overridden using command line arguments.
Note: The pipeline assumes each FITS file covers the same area of sky and
that ALL sources are contained within that area (i.e., a survey field). The
pipeline is not currently set up to understand pointed observations, where
each source has been observed separately.
Example:
./1_verify_image_data.py -I *I.fits -Q *Q.fits -U *U.fits testData/
"""
# Parse the command line options
parser = argparse.ArgumentParser(description=descStr,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('dataPath', metavar='PATH/TO/DATA', default='.',
nargs='?', help='Path to data directory [.]')
parser.add_argument('-I', dest='patI', default=patIdefault,
help='Pattern to match Stokes I files [%s]' % patIdefault)
parser.add_argument('-Q', dest='patQ', default=patQdefault,
help='Pattern to match Stokes Q files [%s]' % patQdefault)
parser.add_argument('-U', dest='patU', default=patUdefault,
help='Pattern to match Stokes U files [%s]' % patUdefault)
args = parser.parse_args()
dataPath = args.dataPath
patI = args.patI
patQ = args.patQ
patU = args.patU
# Call the verify function
verify_image_data(dataPath, patI, patQ, patU)
#-----------------------------------------------------------------------------#
def verify_image_data(dataPath, patI, patQ, patU):
"""
Scan for Stokes I, Q & U files matching the wildcard patterns in the data
directory. Loop through the files, read their frequencies from the CDELT3
header card and write an asscending vector of frequency to an ASCII text
file. Also write lists of channel numbers and separate lists of I, Q & U
files in frequency order. The script assumes that each file is a single
plane drawn from a larger cube.
"""
# Sanity checks
if not os.path.exists(dataPath):
print "\nErr: The directory '%s' does not exist." % dataPath
sys.exit()
dataPath = dataPath.rstrip('/')
# Find all of the Stokes I,Q,U files in the directory
print "\nPatterns = '%s', '%s', '%s'" % (patI, patQ, patU)
print "Scanning the directory '%s' ..." % dataPath
dataILst = glob.glob(dataPath + '/' + patI)
sort_nicely(dataILst)
nIfiles = len(dataILst)
dataQLst = glob.glob(dataPath + '/' + patQ)
sort_nicely(dataQLst)
nQfiles = len(dataQLst)
dataULst = glob.glob(dataPath + '/' + patU)
sort_nicely(dataULst)
nUfiles = len(dataULst)
# Check that the same number of files was found for each pattern
print "Found %d I files, %d Q files and %d U files.\n" % (nIfiles,
nQfiles,
nUfiles)
if nIfiles!=nQfiles or nIfiles!=nUfiles:
print "Err: script cannot process unequal numbers of files."
print "If necessary use NaN-filled files to replace missing data."
sys.exit()
if nIfiles==0:
print "Err: No matching files found."
sys.exit()
if nIfiles<=3:
print "Err: Less than three matching files found."
print "This means there are <3 channels in your dataset!"
sys.exit()
# List to store frequency
freqLst = []
# Loop through the files in lock-step
print "Scanning through the files ..."
for i in range(len(dataILst)):
# Some feedback
print os.path.split(dataILst[i])[-1],
print os.path.split(dataQLst[i])[-1],
print os.path.split(dataULst[i])[-1]
# Read the headers
headI = pf.getheader(dataILst[i])
headQ = pf.getheader(dataQLst[i])
headU = pf.getheader(dataULst[i])
# Check the number of axes are correct and >= 3
naxisI = headI['NAXIS']
naxisQ = headI['NAXIS']
naxisU = headI['NAXIS']
if naxisI!=naxisQ or naxisI!=naxisU:
print "Err: The number of dimensions in each file do not match."
print "[%s, %s, %s]" % (naxisI, naxisQ, naxisU)
sys.exit()
if naxisI<3:
print "Err: Less than 3 data axes found [NAXIS = %d]." % naxisI
sys.exit()
# Check the shape of the images are the same
shapeI = (headI['NAXIS2'], headI['NAXIS2'])
shapeQ = (headQ['NAXIS2'], headQ['NAXIS2'])
shapeU = (headU['NAXIS2'], headU['NAXIS2'])
if shapeI!=shapeQ or shapeI!=shapeU:
print "Err: The shape of the three images arrays do not match."
print "[%s, %s, %s]" % (shapeI, shapeQ, shapeU)
sys.exit()
# Check the frequencies are the same (assume CRVAL3=freq)
freqI = headI['CRVAL3'] + (1 - headI['CRPIX3']) * headI['CDELT3']
freqQ = headQ['CRVAL3'] + (1 - headQ['CRPIX3']) * headQ['CDELT3']
freqU = headU['CRVAL3'] + (1 - headU['CRPIX3']) * headU['CDELT3']
if freqI!=freqQ or freqI!=freqU:
print "The frequencies of the three Stokes files do not match."
print "[%s, %s, %s]" % (freqI, freqQ, freqU)
sys.exit()
# Record the parameters
freqLst.append(freqI)
# Sort the filenames, channels into frequency order
multiLst = zip(freqLst,
[os.path.split(x)[-1] for x in dataILst],
[os.path.split(x)[-1] for x in dataQLst],
[os.path.split(x)[-1] for x in dataULst])
multiLst.sort()
freqLstS, dataILstS, dataQLstS, dataULstS = zip(*multiLst)
# Save an ordered frequency list
freqFile = dataPath + '/freqs_Hz.txt'
print "\nSaving ascending frequency vector to '%s'." % freqFile
if os.path.exists(freqFile):
os.remove(freqFile)
np.savetxt(freqFile, freqLstS)
# Save the list of I,Q & U files
dataFile = dataPath + '/fileLstI.txt'
print "Saving ordered list of Stokes I files to '%s'." % dataFile
if os.path.exists(dataFile):
os.remove(dataFile)
np.savetxt(dataFile, dataILstS, fmt='%s')
dataFile = dataPath + '/fileLstQ.txt'
print "Saving ordered list of Stokes Q files to '%s'." % dataFile
if os.path.exists(dataFile):
os.remove(dataFile)
np.savetxt(dataFile, dataQLstS, fmt='%s')
dataFile = dataPath + '/fileLstU.txt'
print "Saving ordered list of Stokes U files to '%s'." % dataFile
if os.path.exists(dataFile):
os.remove(dataFile)
np.savetxt(dataFile, dataULstS, fmt='%s')
# Note the type of data in a file
typeFile = dataPath + '/dataType.txt'
print "Noting dataType='FITS_planes' in file '%s'." % typeFile
FH = open(typeFile, 'w')
FH.write("FITS_planes\n")
FH.close()
#-----------------------------------------------------------------------------#
if __name__=="__main__":
main()
|
|
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
import datetime
import itertools
import random
import mock
from novaclient import exceptions as nova_exception
from oslotest import base as test_base
import six
from ec2api.api import instance as instance_api
import ec2api.clients
from ec2api import exception
from ec2api.tests.unit import base
from ec2api.tests.unit import fakes
from ec2api.tests.unit import matchers
from ec2api.tests.unit import tools
class InstanceTestCase(base.ApiTestCase):
def setUp(self):
super(InstanceTestCase, self).setUp()
self.addCleanup(self._reset_engine)
self.network_interface_api = self.mock(
'ec2api.api.instance.network_interface_api')
self.address_api = self.mock('ec2api.api.address')
self.security_group_api = self.mock(
'ec2api.api.instance.security_group_api')
self.utils_generate_uid = self.mock(
'ec2api.api.instance._utils_generate_uid')
self.fake_flavor = mock.Mock()
self.fake_flavor.configure_mock(name='fake_flavor',
id='fakeFlavorId')
self.nova.flavors.get.return_value = self.fake_flavor
self.nova.flavors.list.return_value = [self.fake_flavor]
def _reset_engine(self):
instance_api.instance_engine = instance_api.InstanceEngineNeutron()
@mock.patch('ec2api.api.instance.describe_instances')
@mock.patch('ec2api.api.instance.InstanceEngineNeutron.'
'get_vpc_default_security_group_id')
def test_run_instances(self, get_vpc_default_security_group_id,
describe_instances):
"""Run instance with various network interface settings."""
instance_api.instance_engine = (
instance_api.InstanceEngineNeutron())
self.set_mock_db_items(
fakes.DB_SUBNET_1, fakes.DB_NETWORK_INTERFACE_1, fakes.DB_IMAGE_1,
fakes.DB_IMAGE_ARI_1, fakes.DB_IMAGE_AKI_1)
self.glance.images.get.return_value = fakes.OSImage(fakes.OS_IMAGE_1)
self.network_interface_api.create_network_interface.return_value = (
{'networkInterface': fakes.EC2_NETWORK_INTERFACE_1})
self.db_api.add_item.return_value = fakes.DB_INSTANCE_1
self.nova.servers.create.return_value = (
fakes.OSInstance({
'id': fakes.ID_OS_INSTANCE_1,
'flavor': {'id': 'fakeFlavorId'},
'image': {'id': fakes.ID_OS_IMAGE_1}}))
self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1
get_vpc_default_security_group_id.return_value = None
def do_check(params, create_network_interface_kwargs=None,
delete_on_termination=None):
delete_port_on_termination = (
create_network_interface_kwargs is not None
if delete_on_termination is None
else delete_on_termination)
eni = fakes.gen_ec2_network_interface(
fakes.ID_EC2_NETWORK_INTERFACE_1,
fakes.EC2_SUBNET_1,
[fakes.IP_NETWORK_INTERFACE_1],
description=fakes.DESCRIPTION_NETWORK_INTERFACE_1,
ec2_instance_id=fakes.ID_EC2_INSTANCE_1,
device_index=0,
delete_on_termination=delete_port_on_termination)
expected_reservation = fakes.gen_ec2_reservation(
fakes.ID_EC2_RESERVATION_1,
[tools.patch_dict(
fakes.gen_ec2_instance(
fakes.ID_EC2_INSTANCE_1,
private_ip_address=fakes.IP_NETWORK_INTERFACE_1,
ec2_network_interfaces=[eni],
image_id=fakes.ID_EC2_IMAGE_1,
reservation_id=fakes.ID_EC2_RESERVATION_1),
{'privateDnsName': None},
['rootDeviceType', 'rootDeviceName'])])
describe_instances.return_value = {
'reservationSet': [expected_reservation]}
params.update({'ImageId': fakes.ID_EC2_IMAGE_1,
'InstanceType': 'fake_flavor',
'MinCount': '1', 'MaxCount': '1'})
resp = self.execute('RunInstances', params)
self.assertThat(resp, matchers.DictMatches(expected_reservation))
if create_network_interface_kwargs is not None:
(self.network_interface_api.
create_network_interface.assert_called_once_with(
mock.ANY, fakes.ID_EC2_SUBNET_1,
**create_network_interface_kwargs))
self.nova.servers.create.assert_called_once_with(
fakes.EC2_INSTANCE_1['privateDnsName'],
fakes.ID_OS_IMAGE_1, self.fake_flavor,
min_count=1, max_count=1,
kernel_id=None, ramdisk_id=None,
availability_zone=None,
block_device_mapping_v2=[],
security_groups=None,
nics=[{'port-id': fakes.ID_OS_PORT_1}],
key_name=None, userdata=None)
self.db_api.add_item.assert_called_once_with(
mock.ANY, 'i', tools.purge_dict(fakes.DB_INSTANCE_1, ('id',)))
(self.network_interface_api.
_attach_network_interface_item.assert_called_once_with(
mock.ANY, fakes.DB_NETWORK_INTERFACE_1,
fakes.ID_EC2_INSTANCE_1, 0,
delete_on_termination=delete_port_on_termination))
describe_instances.assert_called_once_with(
mock.ANY, [fakes.ID_EC2_INSTANCE_1])
self.network_interface_api.reset_mock()
self.nova.servers.reset_mock()
self.db_api.reset_mock()
describe_instances.reset_mock()
do_check({'SubnetId': fakes.ID_EC2_SUBNET_1},
create_network_interface_kwargs={})
do_check({'SubnetId': fakes.ID_EC2_SUBNET_1,
'SecurityGroupId.1': fakes.ID_EC2_SECURITY_GROUP_1,
'SecurityGroupId.2': fakes.ID_EC2_SECURITY_GROUP_2},
create_network_interface_kwargs={
'security_group_id': [fakes.ID_EC2_SECURITY_GROUP_1,
fakes.ID_EC2_SECURITY_GROUP_2]})
do_check({'SubnetId': fakes.ID_EC2_SUBNET_1,
'PrivateIpAddress': fakes.IP_FIRST_SUBNET_1},
create_network_interface_kwargs={
'private_ip_address': fakes.IP_FIRST_SUBNET_1})
do_check({'NetworkInterface.1.DeviceIndex': '0',
'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1,
'NetworkInterface.1.SecurityGroupId.1': (
fakes.ID_EC2_SECURITY_GROUP_1),
'NetworkInterface.1.PrivateIpAddress.1': (
fakes.IP_FIRST_SUBNET_1)},
create_network_interface_kwargs={
'security_group_id': [fakes.ID_EC2_SECURITY_GROUP_1],
'private_ip_address': [fakes.IP_FIRST_SUBNET_1]})
do_check({'NetworkInterface.1.DeviceIndex': '0',
'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1,
'NetworkInterface.1.DeleteOnTermination': 'False'},
create_network_interface_kwargs={},
delete_on_termination=False)
do_check({'NetworkInterface.1.DeviceIndex': '0',
'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1,
'NetworkInterface.1.SecurityGroupId.1': (
fakes.ID_EC2_SECURITY_GROUP_1),
'NetworkInterface.1.DeleteOnTermination': 'False'},
create_network_interface_kwargs={
'security_group_id': [fakes.ID_EC2_SECURITY_GROUP_1]},
delete_on_termination=False)
do_check({'NetworkInterface.1.DeviceIndex': '0',
'NetworkInterface.1.NetworkInterfaceId': (
fakes.ID_EC2_NETWORK_INTERFACE_1)})
@mock.patch('ec2api.api.instance.describe_instances')
@mock.patch('ec2api.api.instance.InstanceEngineNeutron.'
'get_vpc_default_security_group_id')
def test_run_instances_multiple_networks(self,
get_vpc_default_security_group_id,
describe_instances):
"""Run 2 instances at once on 2 subnets in all combinations."""
instance_api.instance_engine = (
instance_api.InstanceEngineNeutron())
self._build_multiple_data_model()
self.glance.images.get.return_value = fakes.OSImage(fakes.OS_IMAGE_1)
get_vpc_default_security_group_id.return_value = None
ec2_instances = [
tools.patch_dict(
fakes.gen_ec2_instance(
ec2_instance_id, launch_index=l_i,
ec2_network_interfaces=eni_pair,
reservation_id=fakes.ID_EC2_RESERVATION_1),
{'privateDnsName': None},
['rootDeviceType', 'rootDeviceName'])
for l_i, (ec2_instance_id, eni_pair) in enumerate(zip(
self.IDS_EC2_INSTANCE,
zip(*[iter(self.EC2_ATTACHED_ENIS)] * 2)))]
ec2_reservation = fakes.gen_ec2_reservation(fakes.ID_EC2_RESERVATION_1,
ec2_instances)
describe_instances.return_value = {'reservationSet': [ec2_reservation]}
self.set_mock_db_items(
fakes.DB_IMAGE_1, fakes.DB_SUBNET_1, fakes.DB_SUBNET_2,
*self.DB_DETACHED_ENIS)
self.network_interface_api.create_network_interface.side_effect = (
[{'networkInterface': eni}
for eni in self.EC2_DETACHED_ENIS])
self.nova.servers.create.side_effect = [
fakes.OSInstance({
'id': os_instance_id,
'flavor': {'id': 'fakeFlavorId'}})
for os_instance_id in self.IDS_OS_INSTANCE]
self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1
self.db_api.add_item.side_effect = self.DB_INSTANCES
resp = self.execute(
'RunInstances',
{'ImageId': fakes.ID_EC2_IMAGE_1,
'InstanceType': 'fake_flavor',
'MinCount': '2',
'MaxCount': '2',
'NetworkInterface.1.DeviceIndex': '0',
'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1,
'NetworkInterface.2.DeviceIndex': '1',
'NetworkInterface.2.SubnetId': fakes.ID_EC2_SUBNET_2,
'NetworkInterface.2.DeleteOnTermination': 'False'})
self.assertThat(resp, matchers.DictMatches(ec2_reservation),
verbose=True)
self.network_interface_api.create_network_interface.assert_has_calls([
mock.call(mock.ANY, ec2_subnet_id)
for ec2_subnet_id in self.IDS_EC2_SUBNET_BY_PORT])
self.nova.servers.create.assert_has_calls([
mock.call(
'%s-%s' % (fakes.ID_EC2_RESERVATION_1, launch_index),
fakes.ID_OS_IMAGE_1, self.fake_flavor,
min_count=1, max_count=1,
kernel_id=None, ramdisk_id=None,
availability_zone=None,
block_device_mapping_v2=[],
security_groups=None,
nics=[{'port-id': port_id}
for port_id in port_ids],
key_name=None, userdata=None)
for launch_index, port_ids in enumerate(
zip(*[iter(self.IDS_OS_PORT)] * 2))])
(self.network_interface_api.
_attach_network_interface_item.assert_has_calls([
mock.call(mock.ANY, eni, ec2_instance_id, dev_ind,
delete_on_termination=dot)
for eni, ec2_instance_id, dev_ind, dot in zip(
self.DB_DETACHED_ENIS,
itertools.chain(*map(lambda i: [i] * 2,
self.IDS_EC2_INSTANCE)),
[0, 1] * 2,
[True, False, True, False])]))
self.db_api.add_item.assert_has_calls([
mock.call(mock.ANY, 'i', tools.purge_dict(db_instance, ['id']))
for db_instance in self.DB_INSTANCES])
@mock.patch('ec2api.api.instance._parse_block_device_mapping')
@mock.patch('ec2api.api.instance.describe_instances')
@mock.patch('ec2api.api.instance.InstanceEngineNeutron.'
'get_ec2_classic_os_network')
def test_run_instances_other_parameters(self, get_ec2_classic_os_network,
describe_instances,
parse_block_device_mapping):
self.set_mock_db_items(
fakes.DB_IMAGE_1, fakes.DB_IMAGE_AKI_1, fakes.DB_IMAGE_ARI_1)
self.glance.images.get.side_effect = (
tools.get_by_1st_arg_getter({
fakes.ID_OS_IMAGE_1: fakes.OSImage(fakes.OS_IMAGE_1),
fakes.ID_OS_IMAGE_AKI_1: fakes.OSImage(fakes.OS_IMAGE_AKI_1),
fakes.ID_OS_IMAGE_ARI_1: fakes.OSImage(fakes.OS_IMAGE_ARI_1)}))
get_ec2_classic_os_network.return_value = {'id': fakes.random_os_id()}
user_data = base64.b64decode(fakes.USER_DATA_INSTANCE_2)
parse_block_device_mapping.return_value = []
def do_check(engine, extra_kwargs={}, extra_db_instance={}):
instance_api.instance_engine = engine
describe_instances.side_effect = [
{'reservationSet': []},
{'reservationSet': [{'foo': 'bar'}]}]
self.execute(
'RunInstances',
{'ImageId': fakes.ID_EC2_IMAGE_1,
'InstanceType': 'fake_flavor',
'MinCount': '1', 'MaxCount': '1',
'KernelId': fakes.ID_EC2_IMAGE_AKI_1,
'RamdiskId': fakes.ID_EC2_IMAGE_ARI_1,
'SecurityGroup.1': 'default',
'Placement.AvailabilityZone': 'fake_zone',
'ClientToken': 'fake_client_token',
'BlockDeviceMapping.1.DeviceName': '/dev/vdd',
'BlockDeviceMapping.1.Ebs.SnapshotId': (
fakes.ID_EC2_SNAPSHOT_1),
'BlockDeviceMapping.1.Ebs.DeleteOnTermination': 'False',
'UserData': fakes.USER_DATA_INSTANCE_2})
self.nova.servers.create.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, min_count=1, max_count=1,
userdata=user_data, kernel_id=fakes.ID_OS_IMAGE_AKI_1,
ramdisk_id=fakes.ID_OS_IMAGE_ARI_1, key_name=None,
block_device_mapping_v2=[],
availability_zone='fake_zone', security_groups=['default'],
**extra_kwargs)
self.nova.servers.reset_mock()
db_instance = {'os_id': mock.ANY,
'vpc_id': None,
'reservation_id': mock.ANY,
'launch_index': 0,
'client_token': 'fake_client_token'}
db_instance.update(extra_db_instance)
self.db_api.add_item.assert_called_once_with(
mock.ANY, 'i', db_instance)
self.db_api.reset_mock()
parse_block_device_mapping.assert_called_once_with(
mock.ANY,
[{'device_name': '/dev/vdd',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1,
'delete_on_termination': False}}])
parse_block_device_mapping.reset_mock()
do_check(
instance_api.InstanceEngineNeutron(),
extra_kwargs={
'nics': [
{'net-id': get_ec2_classic_os_network.return_value['id']}],
},
extra_db_instance={'vpc_id': None})
do_check(instance_api.InstanceEngineNova())
@mock.patch('ec2api.api.instance.describe_instances')
def test_idempotent_run(self, describe_instances):
self.set_mock_db_items()
# NOTE(ft): check select corresponding instance by client_token
describe_instances.return_value = {
'reservationSet': [{'key': 'value'}]}
resp = self.execute('RunInstances',
{'MinCount': '1', 'MaxCount': '1',
'ImageId': fakes.ID_EC2_IMAGE_1,
'InstanceType': 'fake_flavor',
'ClientToken': 'client-token-1'})
self.assertEqual({'key': 'value'}, resp)
describe_instances.assert_called_once_with(
mock.ANY, filter=[{'name': 'client-token',
'value': ['client-token-1']}])
# NOTE(ft): check pass to general run_instances logic if no
# corresponding client_token is found
describe_instances.return_value = {'reservationSet': []}
self.assert_execution_error(
'InvalidAMIID.NotFound', 'RunInstances',
{'MinCount': '1', 'MaxCount': '1',
'ImageId': fakes.ID_EC2_IMAGE_1,
'InstanceType': 'fake_flavor',
'ClientToken': 'client-token-2'})
def test_run_instances_rollback(self):
instance_api.instance_engine = (
instance_api.InstanceEngineNeutron())
self.set_mock_db_items(fakes.DB_IMAGE_1, fakes.DB_SUBNET_1,
fakes.DB_NETWORK_INTERFACE_1)
self.glance.images.get.return_value = fakes.OSImage(fakes.OS_IMAGE_1)
self.network_interface_api.create_network_interface.return_value = (
{'networkInterface': fakes.EC2_NETWORK_INTERFACE_1})
self.db_api.add_item.return_value = fakes.DB_INSTANCE_1
self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1
self.nova.servers.create.return_value = (
fakes.OSInstance({'id': fakes.ID_OS_INSTANCE_1,
'flavor': {'id': 'fakeFlavorId'},
'image': {'id': fakes.ID_OS_IMAGE_1}}))
(self.network_interface_api.
_attach_network_interface_item.side_effect) = Exception()
@tools.screen_unexpected_exception_logs
def do_check(params, new_port=True):
mock_manager = mock.MagicMock()
mock_manager.attach_mock(self.network_interface_api,
'network_interface_api')
mock_manager.attach_mock(self.neutron, 'neutron')
mock_manager.attach_mock(self.nova.servers, 'nova_servers')
params.update({'ImageId': fakes.ID_EC2_IMAGE_1,
'InstanceType': 'fake_flavor',
'MinCount': '1', 'MaxCount': '1'})
self.assert_execution_error(
self.ANY_EXECUTE_ERROR, 'RunInstances', params)
calls = [mock.call.nova_servers.delete(fakes.ID_OS_INSTANCE_1)]
if new_port:
calls.append(
mock.call.network_interface_api.delete_network_interface(
mock.ANY,
network_interface_id=fakes.ID_EC2_NETWORK_INTERFACE_1))
mock_manager.assert_has_calls(calls)
self.db_api.delete_item.assert_called_once_with(
mock.ANY, fakes.ID_EC2_INSTANCE_1)
self.network_interface_api.reset_mock()
self.neutron.reset_mock()
self.nova.servers.reset_mock()
self.db_api.reset_mock()
do_check({'SubnetId': fakes.ID_EC2_SUBNET_1})
do_check({'NetworkInterface.1.DeviceIndex': '0',
'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1})
do_check({'NetworkInterface.1.DeviceIndex': '0',
'NetworkInterface.1.SubnetId': fakes.ID_EC2_SUBNET_1,
'NetworkInterface.1.DeleteOnTermination': 'False'})
do_check({'NetworkInterface.1.DeviceIndex': '0',
'NetworkInterface.1.NetworkInterfaceId': (
fakes.ID_EC2_NETWORK_INTERFACE_1)},
new_port=False)
@mock.patch('ec2api.api.instance.describe_instances')
def test_run_instances_multiply_rollback(self, describe_instances):
instances = [{'id': fakes.random_ec2_id('i'),
'os_id': fakes.random_os_id()}
for dummy in range(3)]
os_instances = [fakes.OSInstance({'id': inst['os_id']})
for inst in instances]
self.nova_admin.servers.list.return_value = os_instances[:2]
network_interfaces = [{'id': fakes.random_ec2_id('eni'),
'os_id': fakes.random_os_id()}
for dummy in range(3)]
self.set_mock_db_items(fakes.DB_IMAGE_1, fakes.DB_SUBNET_1,
*network_interfaces)
self.glance.images.get.return_value = fakes.OSImage(fakes.OS_IMAGE_1)
self.utils_generate_uid.return_value = fakes.ID_EC2_RESERVATION_1
def do_check(engine):
instance_api.instance_engine = engine
self.network_interface_api.create_network_interface.side_effect = [
{'networkInterface': {'networkInterfaceId': eni['id']}}
for eni in network_interfaces]
self.db_api.add_item.side_effect = instances
self.nova.servers.create.side_effect = os_instances
expected_reservation = {
'reservationId': fakes.ID_EC2_RESERVATION_1,
'instancesSet': [{'instanceId': inst['id']}
for inst in instances[:2]]}
describe_instances.return_value = {
'reservationSet': [expected_reservation]}
resp = self.execute('RunInstances',
{'ImageId': fakes.ID_EC2_IMAGE_1,
'InstanceType': 'fake_flavor',
'MinCount': '2', 'MaxCount': '3',
'SubnetId': fakes.ID_EC2_SUBNET_1})
self.assertThat(resp, matchers.DictMatches(expected_reservation))
self.nova.servers.delete.assert_called_once_with(
instances[2]['os_id'])
self.db_api.delete_item.assert_called_once_with(
mock.ANY, instances[2]['id'])
self.nova.servers.reset_mock()
self.db_api.reset_mock()
(self.network_interface_api.
_attach_network_interface_item.side_effect) = [
None, None, Exception()]
with tools.ScreeningLogger(log_name='ec2api.api'):
do_check(instance_api.InstanceEngineNeutron())
(self.network_interface_api.delete_network_interface.
assert_called_once_with(
mock.ANY, network_interface_id=network_interfaces[2]['id']))
self.nova.servers.update.side_effect = [None, None, Exception()]
with tools.ScreeningLogger(log_name='ec2api.api'):
do_check(instance_api.InstanceEngineNova())
def test_run_instances_invalid_parameters(self):
self.assert_execution_error('InvalidParameterValue', 'RunInstances',
{'ImageId': fakes.ID_EC2_IMAGE_1,
'MinCount': '0', 'MaxCount': '0'})
self.assert_execution_error('InvalidParameterValue', 'RunInstances',
{'ImageId': fakes.ID_EC2_IMAGE_1,
'MinCount': '1', 'MaxCount': '0'})
self.assert_execution_error('InvalidParameterValue', 'RunInstances',
{'ImageId': fakes.ID_EC2_IMAGE_1,
'MinCount': '0', 'MaxCount': '1'})
self.assert_execution_error('InvalidParameterValue', 'RunInstances',
{'ImageId': fakes.ID_EC2_IMAGE_1,
'MinCount': '2', 'MaxCount': '1'})
@mock.patch.object(fakes.OSInstance, 'delete', autospec=True)
@mock.patch.object(fakes.OSInstance, 'get', autospec=True)
def test_terminate_instances(self, os_instance_get, os_instance_delete):
"""Terminate 2 instances in one request."""
instance_api.instance_engine = (
instance_api.InstanceEngineNeutron())
self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2)
os_instances = [fakes.OSInstance(fakes.OS_INSTANCE_1),
fakes.OSInstance(fakes.OS_INSTANCE_2)]
self.nova.servers.get.side_effect = os_instances
resp = self.execute('TerminateInstances',
{'InstanceId.1': fakes.ID_EC2_INSTANCE_1,
'InstanceId.2': fakes.ID_EC2_INSTANCE_2})
fake_state_change = {'previousState': {'code': 0,
'name': 'pending'},
'currentState': {'code': 0,
'name': 'pending'}}
self.assertThat(
resp,
matchers.DictMatches(
{'instancesSet': [
tools.update_dict({'instanceId': fakes.ID_EC2_INSTANCE_1},
fake_state_change),
tools.update_dict({'instanceId': fakes.ID_EC2_INSTANCE_2},
fake_state_change)]}))
self.assertEqual(2, self.nova.servers.get.call_count)
self.nova.servers.get.assert_any_call(fakes.ID_OS_INSTANCE_1)
self.nova.servers.get.assert_any_call(fakes.ID_OS_INSTANCE_2)
self.assertFalse(self.db_api.delete_item.called)
self.assertEqual(2, os_instance_delete.call_count)
self.assertEqual(2, os_instance_get.call_count)
for call_num, inst_id in enumerate(os_instances):
self.assertEqual(mock.call(inst_id),
os_instance_delete.call_args_list[call_num])
self.assertEqual(mock.call(inst_id),
os_instance_get.call_args_list[call_num])
def test_terminate_instances_multiple_networks(self):
"""Terminate an instance with various combinations of ports."""
self._build_multiple_data_model()
fake_state_change = {'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 16,
'name': 'running'}}
ec2_terminate_instances_result = {
'instancesSet': [
tools.update_dict({'instanceId': fakes.ID_EC2_INSTANCE_1},
fake_state_change),
tools.update_dict({'instanceId': fakes.ID_EC2_INSTANCE_2},
fake_state_change)]}
self.nova.servers.get.side_effect = (
lambda ec2_id: fakes.OSInstance({'id': ec2_id,
'vm_state': 'active'}))
self.set_mock_db_items(*self.DB_INSTANCES)
resp = self.execute('TerminateInstances',
{'InstanceId.1': fakes.ID_EC2_INSTANCE_1,
'InstanceId.2': fakes.ID_EC2_INSTANCE_2})
self.assertThat(
resp, matchers.DictMatches(ec2_terminate_instances_result))
self.assertFalse(self.db_api.delete_item.called)
def test_terminate_instances_invalid_parameters(self):
self.assert_execution_error(
'InvalidInstanceID.NotFound', 'TerminateInstances',
{'InstanceId.1': fakes.random_ec2_id('i')})
@mock.patch('ec2api.api.instance._get_os_instances_by_instances')
def _test_instances_operation(self, operation, os_instance_operation,
valid_state, invalid_state,
get_os_instances_by_instances):
os_instance_1 = fakes.OSInstance(fakes.OS_INSTANCE_1)
os_instance_2 = fakes.OSInstance(fakes.OS_INSTANCE_2)
for inst in (os_instance_1, os_instance_2):
setattr(inst, 'OS-EXT-STS:vm_state', valid_state)
self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2)
get_os_instances_by_instances.return_value = [os_instance_1,
os_instance_2]
resp = self.execute(operation,
{'InstanceId.1': fakes.ID_EC2_INSTANCE_1,
'InstanceId.2': fakes.ID_EC2_INSTANCE_2})
self.assertEqual({'return': True}, resp)
self.assertEqual([mock.call(os_instance_1), mock.call(os_instance_2)],
os_instance_operation.mock_calls)
self.db_api.get_items_by_ids.assert_called_once_with(
mock.ANY, set([fakes.ID_EC2_INSTANCE_1, fakes.ID_EC2_INSTANCE_2]))
get_os_instances_by_instances.assert_called_once_with(
mock.ANY, [fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2], exactly=True)
setattr(os_instance_2, 'OS-EXT-STS:vm_state', invalid_state)
os_instance_operation.reset_mock()
self.assert_execution_error('IncorrectInstanceState', 'StartInstances',
{'InstanceId.1': fakes.ID_EC2_INSTANCE_1,
'InstanceId.2': fakes.ID_EC2_INSTANCE_2})
self.assertEqual(0, os_instance_operation.call_count)
@mock.patch.object(fakes.OSInstance, 'start', autospec=True)
def test_start_instances(self, os_instance_start):
self._test_instances_operation('StartInstances', os_instance_start,
instance_api.vm_states_STOPPED,
instance_api.vm_states_ACTIVE)
@mock.patch.object(fakes.OSInstance, 'stop', autospec=True)
def test_stop_instances(self, os_instance_stop):
self._test_instances_operation('StopInstances', os_instance_stop,
instance_api.vm_states_ACTIVE,
instance_api.vm_states_STOPPED)
@mock.patch.object(fakes.OSInstance, 'reboot', autospec=True)
def test_reboot_instances(self, os_instance_reboot):
self._test_instances_operation('RebootInstances', os_instance_reboot,
instance_api.vm_states_ACTIVE,
instance_api.vm_states_BUILDING)
@mock.patch('oslo_utils.timeutils.utcnow')
def _test_instance_get_operation(self, operation, getter, key, utcnow):
self.set_mock_db_items(fakes.DB_INSTANCE_2)
os_instance_2 = fakes.OSInstance(fakes.OS_INSTANCE_2)
self.nova.servers.get.return_value = os_instance_2
getter.return_value = 'fake_data'
utcnow.return_value = datetime.datetime(2015, 1, 19, 23, 34, 45, 123)
resp = self.execute(operation,
{'InstanceId': fakes.ID_EC2_INSTANCE_2})
expected_data = (base64.b64encode(six.b(getter.return_value))
.decode("utf-8"))
self.assertEqual({'instanceId': fakes.ID_EC2_INSTANCE_2,
'timestamp': '2015-01-19T23:34:45.000Z',
key: expected_data},
resp)
self.db_api.get_item_by_id.assert_called_once_with(
mock.ANY, fakes.ID_EC2_INSTANCE_2)
self.nova.servers.get.assert_called_once_with(fakes.ID_OS_INSTANCE_2)
getter.assert_called_once_with(os_instance_2)
@mock.patch.object(fakes.OSInstance, 'get_password', autospec=True)
def test_get_password_data(self, get_password):
self._test_instance_get_operation('GetPasswordData',
get_password, 'passwordData')
@mock.patch.object(fakes.OSInstance, 'get_console_output', autospec=True)
def test_console_output(self, get_console_output):
self._test_instance_get_operation('GetConsoleOutput',
get_console_output, 'output')
def test_describe_instances(self):
"""Describe 2 instances, one of which is vpc instance."""
instance_api.instance_engine = (
instance_api.InstanceEngineNeutron())
self.set_mock_db_items(
fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2,
fakes.DB_IMAGE_1, fakes.DB_IMAGE_2,
fakes.DB_IMAGE_ARI_1, fakes.DB_IMAGE_AKI_1,
fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, fakes.DB_VOLUME_3)
self.nova_admin.servers.list.return_value = [
fakes.OSInstance_full(fakes.OS_INSTANCE_1),
fakes.OSInstance_full(fakes.OS_INSTANCE_2)]
self.nova_admin.servers.get.return_value = (
fakes.OSInstance_full(fakes.OS_INSTANCE_1))
self.cinder.volumes.list.return_value = [
fakes.OSVolume(fakes.OS_VOLUME_1),
fakes.OSVolume(fakes.OS_VOLUME_2),
fakes.OSVolume(fakes.OS_VOLUME_3)]
self.network_interface_api.describe_network_interfaces.side_effect = (
lambda *args, **kwargs: copy.deepcopy({
'networkInterfaceSet': [fakes.EC2_NETWORK_INTERFACE_1,
fakes.EC2_NETWORK_INTERFACE_2]}))
self.security_group_api.describe_security_groups.return_value = {
'securityGroupInfo': [fakes.EC2_SECURITY_GROUP_1,
fakes.EC2_SECURITY_GROUP_3]}
resp = self.execute('DescribeInstances', {})
self.assertThat(resp, matchers.DictMatches(
{'reservationSet': [fakes.EC2_RESERVATION_1,
fakes.EC2_RESERVATION_2]},
orderless_lists=True))
self.nova_admin.servers.list.assert_called_once_with(
search_opts={'all_tenants': True,
'project_id': fakes.ID_OS_PROJECT})
self.cinder.volumes.list.assert_called_once_with(search_opts=None)
self.nova_admin.reset_mock()
self.db_api.get_items_by_ids = tools.CopyingMock(
return_value=[fakes.DB_INSTANCE_1])
resp = self.execute('DescribeInstances',
{'InstanceId.1': fakes.ID_EC2_INSTANCE_1})
self.assertThat(resp, matchers.DictMatches(
{'reservationSet': [fakes.EC2_RESERVATION_1]},
orderless_lists=True))
self.db_api.get_items_by_ids.assert_called_once_with(
mock.ANY, set([fakes.ID_EC2_INSTANCE_1]))
(self.network_interface_api.describe_network_interfaces.
assert_called_with(mock.ANY))
self.assertFalse(self.nova_admin.servers.list.called)
self.nova_admin.servers.get.assert_called_once_with(
fakes.ID_OS_INSTANCE_1)
self.check_filtering(
'DescribeInstances', 'reservationSet',
[('availability-zone', fakes.NAME_AVAILABILITY_ZONE),
('block-device-mapping.delete-on-termination', False),
('block-device-mapping.device-name',
fakes.ROOT_DEVICE_NAME_INSTANCE_2),
('block-device-mapping.status', 'attached'),
('block-device-mapping.volume-id', fakes.ID_EC2_VOLUME_2),
('client-token', fakes.CLIENT_TOKEN_INSTANCE_2),
# TODO(ft): support filtering by none/empty value
# ('dns-name', ''),
('group-id', fakes.ID_EC2_SECURITY_GROUP_1),
('group-name', fakes.NAME_DEFAULT_OS_SECURITY_GROUP),
('image-id', fakes.ID_EC2_IMAGE_1),
('instance-id', fakes.ID_EC2_INSTANCE_2),
('instance-state-code', 0),
('instance-state-name', 'pending'),
('instance-type', 'fake_flavor'),
('instance.group-id', fakes.ID_EC2_SECURITY_GROUP_1),
('instance.group-name', fakes.NAME_DEFAULT_OS_SECURITY_GROUP),
('ip-address', fakes.IP_ADDRESS_2),
('kernel-id', fakes.ID_EC2_IMAGE_AKI_1),
('key-name', fakes.NAME_KEY_PAIR),
('launch-index', 0),
('launch-time', fakes.TIME_CREATE_INSTANCE_2),
('owner-id', fakes.ID_OS_PROJECT),
('private-dns-name', '%s-%s' % (fakes.ID_EC2_RESERVATION_1, 0)),
('private-ip-address', fakes.IP_NETWORK_INTERFACE_2),
('ramdisk-id', fakes.ID_EC2_IMAGE_ARI_1),
('reservation-id', fakes.ID_EC2_RESERVATION_1),
('root-device-name', fakes.ROOT_DEVICE_NAME_INSTANCE_1),
('root-device-type', 'ebs'),
('subnet-id', fakes.ID_EC2_SUBNET_2),
('vpc-id', fakes.ID_EC2_VPC_1),
('network-interface.description',
fakes.DESCRIPTION_NETWORK_INTERFACE_2),
('network-interface.subnet-id', fakes.ID_EC2_SUBNET_2),
('network-interface.vpc-id', fakes.ID_EC2_VPC_1),
('network-interface.network-interface.id',
fakes.ID_EC2_NETWORK_INTERFACE_2),
('network-interface.owner-id', fakes.ID_OS_PROJECT),
('network-interface.requester-managed', False),
('network-interface.status', 'in-use'),
('network-interface.mac-address', fakes.MAC_ADDRESS),
('network-interface.source-destination-check', True),
('network-interface.group-id', fakes.ID_EC2_SECURITY_GROUP_1),
('network-interface.group-name',
fakes.NAME_DEFAULT_OS_SECURITY_GROUP),
('network-interface.attachment.attachment-id',
fakes.ID_EC2_NETWORK_INTERFACE_2_ATTACH),
('network-interface.attachment.instance-id',
fakes.ID_EC2_INSTANCE_1),
('network-interface.attachment.instance-owner-id',
fakes.ID_OS_PROJECT),
('network-interface.addresses.private-ip-address',
fakes.IP_NETWORK_INTERFACE_2_EXT_1),
('network-interface.attachment.device-index', 0),
('network-interface.attachment.status', 'attached'),
('network-interface.attachment.attach-time',
fakes.TIME_ATTACH_NETWORK_INTERFACE),
('network-interface.attachment.delete-on-termination', False),
('network-interface.addresses.primary', False),
('network-interface.addresses.association.public-ip',
fakes.IP_ADDRESS_2),
('network-interface.addresses.association.ip-owner-id',
fakes.ID_OS_PROJECT),
('association.public-ip', fakes.IP_ADDRESS_2),
('association.ip-owner-id', fakes.ID_OS_PROJECT)])
self.check_tag_support(
'DescribeInstances', ['reservationSet', 'instancesSet'],
fakes.ID_EC2_INSTANCE_1, 'instanceId')
def test_describe_instances_ec2_classic(self):
instance_api.instance_engine = (
instance_api.InstanceEngineNova())
self.set_mock_db_items(
fakes.DB_INSTANCE_2, fakes.DB_IMAGE_1, fakes.DB_IMAGE_2,
fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, fakes.DB_VOLUME_3)
self.nova_admin.servers.list.return_value = [
fakes.OSInstance_full(fakes.OS_INSTANCE_2)]
self.cinder.volumes.list.return_value = [
fakes.OSVolume(fakes.OS_VOLUME_1),
fakes.OSVolume(fakes.OS_VOLUME_2),
fakes.OSVolume(fakes.OS_VOLUME_3)]
self.security_group_api.describe_security_groups.return_value = {
'securityGroupInfo': [fakes.EC2_SECURITY_GROUP_1,
fakes.EC2_SECURITY_GROUP_3]}
resp = self.execute('DescribeInstances', {})
self.assertThat(resp, matchers.DictMatches(
{'reservationSet': [fakes.EC2_RESERVATION_2]},
orderless_lists=True))
def test_describe_instances_mutliple_networks(self):
"""Describe 2 instances with various combinations of network."""
instance_api.instance_engine = (
instance_api.InstanceEngineNeutron())
self._build_multiple_data_model()
self.set_mock_db_items(*self.DB_INSTANCES)
describe_network_interfaces = (
self.network_interface_api.describe_network_interfaces)
self.security_group_api.describe_security_groups.return_value = {
'securityGroupInfo': [fakes.EC2_SECURITY_GROUP_1,
fakes.EC2_SECURITY_GROUP_3]}
def do_check(ips_by_instance=[], ec2_enis_by_instance=[],
ec2_instance_ips=[]):
describe_network_interfaces.return_value = copy.deepcopy(
{'networkInterfaceSet': list(
itertools.chain(*ec2_enis_by_instance))})
self.nova_admin.servers.list.return_value = [
fakes.OSInstance_full({
'id': os_id,
'flavor': {'id': 'fakeFlavorId'},
'addresses': {
subnet_name: [{'addr': addr,
'version': 4,
'OS-EXT-IPS:type': 'fixed'}]
for subnet_name, addr in ips},
'root_device_name': '/dev/vda',
'hostname': '%s-%s' % (fakes.ID_EC2_RESERVATION_1, l_i)})
for l_i, (os_id, ips) in enumerate(zip(
self.IDS_OS_INSTANCE,
ips_by_instance))]
resp = self.execute('DescribeInstances', {})
instances = [fakes.gen_ec2_instance(
inst_id, launch_index=l_i, private_ip_address=ip,
ec2_network_interfaces=enis,
reservation_id=fakes.ID_EC2_RESERVATION_1)
for l_i, (inst_id, ip, enis) in enumerate(zip(
self.IDS_EC2_INSTANCE,
ec2_instance_ips,
ec2_enis_by_instance))]
reservation_set = [fakes.gen_ec2_reservation(
fakes.ID_EC2_RESERVATION_1, instances)]
self.assertThat({'reservationSet': reservation_set},
matchers.DictMatches(resp, orderless_lists=True),
verbose=True)
def ip_info(ind):
return (self.EC2_ATTACHED_ENIS[ind]['subnetId'],
self.EC2_ATTACHED_ENIS[ind]['privateIpAddress'])
# NOTE(ft): 2 instances; the first has 2 correct ports;
# the second has the first port attached by EC2 API but later detached
# by OpenStack and the second port created through EC2 API but
# attached by OpenStack only
do_check(
ips_by_instance=[[ip_info(0), ip_info(1)], [ip_info(3)]],
ec2_enis_by_instance=[
[self.EC2_ATTACHED_ENIS[0], self.EC2_ATTACHED_ENIS[1]],
[]],
ec2_instance_ips=[fakes.IP_FIRST_SUBNET_1, fakes.IP_LAST_SUBNET_2])
# NOTE(ft): 2 instances: the first has the first port attached by
# OpenStack only, the second port is attached correctly;
# the second instance has one port created and attached by OpenStack
# only
do_check(
ips_by_instance=[[ip_info(0), ip_info(1)], [ip_info(3)]],
ec2_enis_by_instance=[[self.EC2_ATTACHED_ENIS[1]], []],
ec2_instance_ips=[None, fakes.IP_LAST_SUBNET_2])
@mock.patch('ec2api.api.instance._remove_instances')
def test_describe_instances_auto_remove(self, remove_instances):
self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
fakes.DB_VOLUME_2)
self.nova_admin.servers.list.return_value = [
fakes.OSInstance_full(fakes.OS_INSTANCE_2)]
self.cinder.volumes.list.return_value = [
fakes.OSVolume(fakes.OS_VOLUME_2)]
self.security_group_api.describe_security_groups.return_value = {
'securityGroupInfo': [fakes.EC2_SECURITY_GROUP_3]}
resp = self.execute('DescribeInstances', {})
self.assertThat(resp,
matchers.DictMatches(
{'reservationSet': [fakes.EC2_RESERVATION_2]},
orderless_lists=True))
remove_instances.assert_called_once_with(
mock.ANY, [fakes.DB_INSTANCE_1])
@mock.patch('ec2api.api.instance._format_instance')
def test_describe_instances_sorting(self, format_instance):
db_instances = [
{'id': fakes.random_ec2_id('i'),
'os_id': fakes.random_os_id(),
'vpc_id': None,
'launch_index': i,
'reservation_id': fakes.ID_EC2_RESERVATION_1}
for i in range(5)]
random.shuffle(db_instances)
self.set_mock_db_items(*db_instances)
os_instances = [
fakes.OSInstance_full({'id': inst['os_id']})
for inst in db_instances]
self.nova_admin.servers.list.return_value = os_instances
format_instance.side_effect = (
lambda context, instance, *args: (
{'instanceId': instance['id'],
'amiLaunchIndex': instance['launch_index']}))
resp = self.execute('DescribeInstances', {})
self.assertEqual(
[0, 1, 2, 3, 4],
[inst['amiLaunchIndex']
for inst in resp['reservationSet'][0]['instancesSet']])
def test_describe_instances_invalid_parameters(self):
self.assert_execution_error(
'InvalidInstanceID.NotFound', 'DescribeInstances',
{'InstanceId.1': fakes.random_ec2_id('i')})
self.set_mock_db_items(fakes.DB_INSTANCE_2)
self.assert_execution_error(
'InvalidInstanceID.NotFound', 'DescribeInstances',
{'InstanceId.1': fakes.ID_EC2_INSTANCE_2,
'InstanceId.2': fakes.random_ec2_id('i')})
def test_describe_instance_attributes(self):
self.set_mock_db_items(fakes.DB_INSTANCE_1, fakes.DB_INSTANCE_2,
fakes.DB_IMAGE_ARI_1, fakes.DB_IMAGE_AKI_1,
fakes.DB_VOLUME_2)
self.nova_admin.servers.get.side_effect = (
tools.get_by_1st_arg_getter({
fakes.ID_OS_INSTANCE_1: (
fakes.OSInstance_full(fakes.OS_INSTANCE_1)),
fakes.ID_OS_INSTANCE_2: (
fakes.OSInstance_full(fakes.OS_INSTANCE_2))}))
self.cinder.volumes.list.return_value = [
fakes.OSVolume(fakes.OS_VOLUME_2)]
self.security_group_api.describe_security_groups.return_value = {
'securityGroupInfo': [fakes.EC2_SECURITY_GROUP_1,
fakes.EC2_SECURITY_GROUP_3]}
def do_check(instance_id, attribute, expected):
resp = self.execute('DescribeInstanceAttribute',
{'InstanceId': instance_id,
'Attribute': attribute})
expected.update({'instanceId': instance_id})
self.assertThat(resp, matchers.DictMatches(expected))
do_check(fakes.ID_EC2_INSTANCE_2, 'blockDeviceMapping',
{'rootDeviceType': 'ebs',
'blockDeviceMapping': (
fakes.EC2_INSTANCE_2['blockDeviceMapping'])})
do_check(fakes.ID_EC2_INSTANCE_2, 'groupSet',
{'groupSet': fakes.EC2_RESERVATION_2['groupSet']})
do_check(fakes.ID_EC2_INSTANCE_2, 'instanceType',
{'instanceType': {'value': 'fake_flavor'}})
do_check(fakes.ID_EC2_INSTANCE_1, 'kernel',
{'kernel': {'value': fakes.ID_EC2_IMAGE_AKI_1}})
do_check(fakes.ID_EC2_INSTANCE_1, 'ramdisk',
{'ramdisk': {'value': fakes.ID_EC2_IMAGE_ARI_1}})
do_check(fakes.ID_EC2_INSTANCE_2, 'rootDeviceName',
{'rootDeviceName': {
'value': fakes.ROOT_DEVICE_NAME_INSTANCE_2}})
do_check(fakes.ID_EC2_INSTANCE_2, 'userData',
{'userData': {'value': fakes.USER_DATA_INSTANCE_2}})
def _build_multiple_data_model(self):
# NOTE(ft): generate necessary fake data
# We need 4 detached ports in 2 subnets.
# Sequence of all ports list is s1i1, s2i1, s1i2, s2i2,
# where sNiM - port info of instance iM on subnet sN.
# We generate port ids but use subnet and instance ids since
# fakes contain enough ids for subnets an instances, but not for ports.
instances_count = 2
subnets_count = 2
ports_count = instances_count * subnets_count
ids_ec2_eni = [fakes.random_ec2_id('eni') for _ in range(ports_count)]
ids_os_port = [fakes.random_os_id() for _ in range(ports_count)]
ids_ec2_subnet = (fakes.ID_EC2_SUBNET_1, fakes.ID_EC2_SUBNET_2)
ids_ec2_subnet_by_port = ids_ec2_subnet * 2
ips = (fakes.IP_FIRST_SUBNET_1, fakes.IP_FIRST_SUBNET_2,
fakes.IP_LAST_SUBNET_1, fakes.IP_LAST_SUBNET_2)
ids_ec2_instance = [fakes.ID_EC2_INSTANCE_1, fakes.ID_EC2_INSTANCE_2]
ids_ec2_instance_by_port = list(
itertools.chain(*map(lambda i: [i] * subnets_count,
ids_ec2_instance)))
ids_os_instance = [fakes.ID_OS_INSTANCE_1, fakes.ID_OS_INSTANCE_2]
dots_by_port = [True, False] * instances_count
db_attached_enis = [
fakes.gen_db_network_interface(
ec2_id, os_id, fakes.ID_EC2_VPC_1,
subnet_ec2_id, ip,
instance_id=instance_ec2_id,
device_index=dev_ind,
delete_on_termination=dot)
for (ec2_id, os_id, subnet_ec2_id, ip, instance_ec2_id, dev_ind,
dot) in zip(
ids_ec2_eni,
ids_os_port,
ids_ec2_subnet_by_port,
ips,
ids_ec2_instance_by_port,
list(range(subnets_count)) * instances_count,
dots_by_port)]
db_detached_enis = [
fakes.gen_db_network_interface(
ec2_id, os_id, fakes.ID_EC2_VPC_1,
subnet_ec2_id, ip)
for ec2_id, os_id, subnet_ec2_id, ip in zip(
ids_ec2_eni,
ids_os_port,
ids_ec2_subnet_by_port,
ips)]
ec2_attached_enis = [
fakes.gen_ec2_network_interface(
db_eni['id'],
None, # ec2_subnet
[db_eni['private_ip_address']],
ec2_instance_id=ec2_instance_id,
device_index=dev_ind,
delete_on_termination=dot,
ec2_subnet_id=ec2_subnet_id,
ec2_vpc_id=fakes.ID_EC2_VPC_1)
for db_eni, dot, ec2_subnet_id, ec2_instance_id, dev_ind in zip(
db_attached_enis,
dots_by_port,
ids_ec2_subnet_by_port,
ids_ec2_instance_by_port,
list(range(subnets_count)) * instances_count)]
ec2_detached_enis = [
fakes.gen_ec2_network_interface(
db_eni['id'],
None, # ec2_subnet
[db_eni['private_ip_address']],
ec2_subnet_id=ec2_subnet_id,
ec2_vpc_id=fakes.ID_EC2_VPC_1)
for db_eni, ec2_subnet_id in zip(
db_detached_enis,
ids_ec2_subnet_by_port)]
db_instances = [
{'id': db_id,
'os_id': os_id,
'vpc_id': fakes.ID_EC2_VPC_1,
'reservation_id': fakes.ID_EC2_RESERVATION_1,
'launch_index': l_i}
for l_i, (db_id, os_id) in enumerate(zip(
ids_ec2_instance,
ids_os_instance))]
self.IDS_EC2_SUBNET = ids_ec2_subnet
self.IDS_OS_PORT = ids_os_port
self.IDS_OS_INSTANCE = ids_os_instance
self.IDS_EC2_INSTANCE = ids_ec2_instance
self.IDS_EC2_SUBNET_BY_PORT = ids_ec2_subnet_by_port
self.DB_DETACHED_ENIS = db_detached_enis
self.EC2_ATTACHED_ENIS = ec2_attached_enis
self.EC2_DETACHED_ENIS = ec2_detached_enis
self.DB_INSTANCES = db_instances
# TODO(ft): add tests for get_vpc_default_security_group_id,
class InstancePrivateTestCase(test_base.BaseTestCase):
def test_merge_network_interface_parameters(self):
engine = instance_api.InstanceEngineNeutron()
self.assertRaises(
exception.InvalidParameterCombination,
engine.merge_network_interface_parameters,
None, 'subnet-1', None, None,
[{'device_index': 0, 'private_ip_address': '10.10.10.10'}])
self.assertRaises(
exception.InvalidParameterCombination,
engine.merge_network_interface_parameters,
None, None, '10.10.10.10', None,
[{'device_index': 0, 'subnet_id': 'subnet-1'}])
self.assertRaises(
exception.InvalidParameterCombination,
engine.merge_network_interface_parameters,
['default'], None, None, None,
[{'device_index': 0, 'subnet_id': 'subnet-1'}])
self.assertRaises(
exception.InvalidParameterCombination,
engine.merge_network_interface_parameters,
None, None, None, ['sg-1'],
[{'device_index': 0, 'subnet_id': 'subnet-1'}])
self.assertRaises(
exception.InvalidParameterCombination,
engine.merge_network_interface_parameters,
None, 'subnet-1', None, None,
[{'device_index': 1, 'associate_public_ip_address': True}])
self.assertRaises(
exception.InvalidParameterCombination,
engine.merge_network_interface_parameters,
None, 'subnet-1', None, None,
[{'device_index': 0, 'associate_public_ip_address': True},
{'device_index': 1, 'subnet_id': 'subnet-2'}])
self.assertRaises(
exception.InvalidParameterCombination,
engine.merge_network_interface_parameters,
None, 'subnet-1', None, None,
[{'device_index': 0}])
self.assertRaises(
exception.InvalidParameterCombination,
engine.merge_network_interface_parameters,
['default'], 'subnet-1', None, None, None)
self.assertRaises(
exception.InvalidParameterCombination,
engine.merge_network_interface_parameters,
None, None, '10.10.10.10', None, None)
self.assertRaises(
exception.InvalidParameterCombination,
engine.merge_network_interface_parameters,
None, None, None, ['sg-1'], None)
self.assertEqual(
([{'device_index': 0,
'subnet_id': 'subnet-1'}]),
engine.merge_network_interface_parameters(
None, 'subnet-1', None, None, None))
self.assertEqual(
([{'device_index': 0,
'subnet_id': 'subnet-1',
'private_ip_address': '10.10.10.10'}]),
engine.merge_network_interface_parameters(
None, 'subnet-1', '10.10.10.10', None, None))
self.assertEqual(
([{'device_index': 0,
'subnet_id': 'subnet-1',
'private_ip_address': '10.10.10.10',
'security_group_id': ['sg-1']}]),
engine.merge_network_interface_parameters(
None, 'subnet-1', '10.10.10.10', ['sg-1'], None))
self.assertEqual(
([{'device_index': 0,
'subnet_id': 'subnet-1',
'security_group_id': ['sg-1']}]),
engine.merge_network_interface_parameters(
None, 'subnet-1', None, ['sg-1'], None))
self.assertEqual(
([{'device_index': 0,
'subnet_id': 'subnet-1'}]),
engine.merge_network_interface_parameters(
None, None, None, None,
[{'device_index': 0, 'subnet_id': 'subnet-1'}]))
self.assertEqual([],
engine.merge_network_interface_parameters(
['default'], None, None, None, None))
self.assertEqual([],
engine.merge_network_interface_parameters(
None, None, None, None, None))
def test_check_network_interface_parameters(self):
engine = instance_api.InstanceEngineNeutron()
self.assertRaises(
exception.InvalidParameterValue,
engine.check_network_interface_parameters,
[{'subnet_id': 'subnet-1'}], False)
self.assertRaises(
exception.InvalidParameterValue,
engine.check_network_interface_parameters,
[{'device_index': 0, 'subnet_id': 'subnet-1'},
{'device_index': 0, 'subnet_id': 'subnet-2'}], False)
self.assertRaises(
exception.InvalidParameterValue,
engine.check_network_interface_parameters,
[{'device_index': 0, 'private_ip_address': '10.10.10.10'}], False)
self.assertRaises(
exception.InvalidParameterCombination,
engine.check_network_interface_parameters,
[{'device_index': 0,
'network_interface_id': 'eni-1',
'subnet_id': 'subnet-1'}],
False)
self.assertRaises(
exception.InvalidParameterCombination,
engine.check_network_interface_parameters,
[{'device_index': 0,
'network_interface_id': 'eni-1',
'private_ip_address': '10.10.10.10'}],
False)
self.assertRaises(
exception.InvalidParameterCombination,
engine.check_network_interface_parameters,
[{'device_index': 0,
'network_interface_id': 'eni-1',
'security_group_id': ['sg-1']}],
False)
self.assertRaises(
exception.InvalidParameterCombination,
engine.check_network_interface_parameters,
[{'device_index': 0,
'network_interface_id': 'eni-1',
'delete_on_termination': True}],
False)
self.assertRaises(
exception.InvalidParameterCombination,
engine.check_network_interface_parameters,
[{'device_index': 0, 'network_interface_id': 'eni-1'}],
True)
self.assertRaises(
exception.InvalidParameterCombination,
engine.check_network_interface_parameters,
[{'device_index': 0,
'subnet_id': 'subnet-1',
'private_ip_address': '10.10.10.10'}],
True)
self.assertRaises(
exception.UnsupportedOperation,
engine.check_network_interface_parameters,
[{'device_index': 1, 'subnet_id': 'subnet-1'}], False)
engine.check_network_interface_parameters(
[{'device_index': 0, 'subnet_id': 'subnet-1'}], False)
engine.check_network_interface_parameters(
[{'device_index': 0,
'subnet_id': 'subnet-1',
'private_ip_address': '10.10.10.10',
'security_group_id': ['sg-1'],
'delete_on_termination': True}],
False)
engine.check_network_interface_parameters(
[{'device_index': 0, 'network_interface_id': 'eni-1'}], False)
engine.check_network_interface_parameters(
[{'device_index': 0,
'subnet_id': 'subnet-1',
'security_group_id': ['sg-1'],
'delete_on_termination': True},
{'device_index': 1,
'subnet_id': 'subnet-2'}],
True)
engine.check_network_interface_parameters([], False)
@mock.patch('ec2api.db.api.IMPL')
def test_parse_network_interface_parameters(self, db_api):
engine = instance_api.InstanceEngineNeutron()
context = base.create_context()
db_api.get_item_by_id.side_effect = tools.get_db_api_get_item_by_id(
fakes.DB_SUBNET_1,
tools.update_dict(fakes.DB_SUBNET_2,
{'vpc_id': fakes.ID_EC2_VPC_2}),
fakes.DB_NETWORK_INTERFACE_1, fakes.DB_NETWORK_INTERFACE_2)
resp = engine.parse_network_interface_parameters(
context,
[{'device_index': 1,
'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1},
{'device_index': 0,
'subnet_id': fakes.ID_EC2_SUBNET_1,
'delete_on_termination': False,
'security_group_id': [fakes.ID_EC2_SECURITY_GROUP_1]}])
self.assertEqual(
(fakes.ID_EC2_VPC_1,
[{'device_index': 0,
'create_args': (fakes.ID_EC2_SUBNET_1,
{'security_group_id': (
[fakes.ID_EC2_SECURITY_GROUP_1])}),
'delete_on_termination': False},
{'device_index': 1,
'network_interface': fakes.DB_NETWORK_INTERFACE_1,
'delete_on_termination': False}]),
resp)
resp = engine.parse_network_interface_parameters(
context,
[{'device_index': 0,
'subnet_id': fakes.ID_EC2_SUBNET_1,
'associate_public_ip_address': True}])
self.assertEqual(
(fakes.ID_EC2_VPC_1,
[{'device_index': 0,
'create_args': (fakes.ID_EC2_SUBNET_1, {}),
'delete_on_termination': True}]),
resp)
# NOTE(ft): a network interface has being attached twice
self.assertRaises(
exception.InvalidParameterValue,
engine.parse_network_interface_parameters, context,
[{'device_index': 0,
'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1},
{'device_index': 1,
'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1}])
# NOTE(ft): a network interface is in use
self.assertRaises(
exception.InvalidNetworkInterfaceInUse,
engine.parse_network_interface_parameters, context,
[{'device_index': 0,
'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_2}])
# NOTE(ft): specified objects are belonging to different VPCs
self.assertRaises(
exception.InvalidParameterValue,
engine.parse_network_interface_parameters, context,
[{'device_index': 0,
'subnet_id': fakes.ID_EC2_SUBNET_1},
{'device_index': 1,
'subnet_id': fakes.ID_EC2_SUBNET_2}])
self.assertRaises(
exception.InvalidParameterValue,
engine.parse_network_interface_parameters, context,
[{'device_index': 0,
'network_interface_id': fakes.ID_EC2_NETWORK_INTERFACE_1},
{'device_index': 1,
'subnet_id': fakes.ID_EC2_SUBNET_2}])
@mock.patch('ec2api.api.ec2utils.get_os_image')
def test_parse_image_parameters(self, get_os_image):
fake_context = base.create_context()
# NOTE(ft): check normal flow
os_image = fakes.OSImage(fakes.OS_IMAGE_1)
get_os_image.side_effect = [
fakes.OSImage(fakes.OS_IMAGE_AKI_1),
fakes.OSImage(fakes.OS_IMAGE_ARI_1),
os_image]
self.assertEqual(
(os_image, fakes.ID_OS_IMAGE_AKI_1, fakes.ID_OS_IMAGE_ARI_1),
instance_api._parse_image_parameters(
fake_context, fakes.ID_EC2_IMAGE_1,
fakes.ID_EC2_IMAGE_AKI_1, fakes.ID_EC2_IMAGE_ARI_1))
get_os_image.assert_has_calls(
[mock.call(fake_context, fakes.ID_EC2_IMAGE_AKI_1),
mock.call(fake_context, fakes.ID_EC2_IMAGE_ARI_1),
mock.call(fake_context, fakes.ID_EC2_IMAGE_1)])
get_os_image.side_effect = None
get_os_image.return_value = os_image
get_os_image.reset_mock()
self.assertEqual(
(os_image, None, None),
instance_api._parse_image_parameters(
fake_context, fakes.ID_EC2_IMAGE_1, None, None))
get_os_image.assert_called_once_with(
fake_context, fakes.ID_EC2_IMAGE_1)
# NOTE(ft): check cases of not available image
os_image = fakes.OSImage({
'id': fakes.random_os_id(),
'status': None,
'properties': {}})
get_os_image.return_value = os_image
self.assertRaises(
exception.InvalidAMIIDUnavailable,
instance_api._parse_image_parameters,
fake_context, fakes.random_ec2_id('ami'), None, None)
os_image.status = 'active'
os_image.properties['image_state'] = 'decrypting'
self.assertRaises(
exception.InvalidAMIIDUnavailable,
instance_api._parse_image_parameters,
fake_context, fakes.random_ec2_id('ami'), None, None)
@mock.patch('ec2api.db.api.IMPL')
def test_parse_block_device_mapping(self, db_api):
fake_context = base.create_context()
db_api.get_item_by_id.side_effect = tools.get_db_api_get_item_by_id(
fakes.DB_VOLUME_1, fakes.DB_VOLUME_2, fakes.DB_VOLUME_3,
fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2)
res = instance_api._parse_block_device_mapping(fake_context, [])
self.assertEqual([], res)
res = instance_api._parse_block_device_mapping(
fake_context, [{'device_name': '/dev/vdf',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}},
{'device_name': '/dev/vdg',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2,
'volume_size': 111,
'delete_on_termination': False}},
{'device_name': '/dev/vdh',
'ebs': {'snapshot_id': fakes.ID_EC2_VOLUME_1}},
{'device_name': '/dev/vdi',
'ebs': {'snapshot_id': fakes.ID_EC2_VOLUME_2,
'delete_on_termination': True}},
{'device_name': '/dev/sdb1',
'ebs': {'volume_size': 55}}])
expected = [{'snapshot_id': fakes.ID_OS_SNAPSHOT_1,
'device_name': '/dev/vdf',
'source_type': 'snapshot',
'destination_type': 'volume'},
{'snapshot_id': fakes.ID_OS_SNAPSHOT_2,
'volume_size': 111,
'device_name': '/dev/vdg',
'source_type': 'snapshot',
'destination_type': 'volume',
'delete_on_termination': False},
{'volume_id': fakes.ID_OS_VOLUME_1,
'device_name': '/dev/vdh',
'source_type': 'volume',
'destination_type': 'volume'},
{'volume_id': fakes.ID_OS_VOLUME_2,
'device_name': '/dev/vdi',
'source_type': 'volume',
'destination_type': 'volume',
'delete_on_termination': True},
{'volume_size': 55,
'device_name': '/dev/sdb1',
'destination_type': 'volume'}]
self.assertThat(expected,
matchers.ListMatches(res, orderless_lists=True),
verbose=True)
res = instance_api._parse_block_device_mapping(
fake_context, [{'device_name': '/dev/vdf',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}},
{'device_name': '/dev/vdf',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2}}])
expected = [{'snapshot_id': fakes.ID_OS_SNAPSHOT_2,
'device_name': '/dev/vdf',
'source_type': 'snapshot',
'destination_type': 'volume'}]
self.assertThat(expected,
matchers.ListMatches(res, orderless_lists=True),
verbose=True)
self.assertRaises(
exception.InvalidBlockDeviceMapping,
instance_api._parse_block_device_mapping,
fake_context,
[{'device_name': '/dev/vdf',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}},
{'device_name': 'vdf',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2}}])
@mock.patch('ec2api.db.api.IMPL')
def test_build_block_device_mapping(self, db_api):
fake_context = base.create_context()
db_api.get_item_by_id.side_effect = tools.get_db_api_get_item_by_id(
fakes.DB_SNAPSHOT_1, fakes.DB_SNAPSHOT_2,
fakes.DB_VOLUME_1, fakes.DB_VOLUME_2)
# check bdm attributes' population
bdms = [
{'device_name': '/dev/sda1',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}},
{'device_name': '/dev/vdb',
'ebs': {'snapshot_id': fakes.ID_EC2_VOLUME_1,
'delete_on_termination': False}},
{'device_name': 'vdc',
'ebs': {'volume_size': 100}},
]
expected = [
{'device_name': '/dev/sda1',
'source_type': 'snapshot',
'destination_type': 'volume',
'uuid': fakes.ID_OS_SNAPSHOT_1,
'delete_on_termination': True,
'boot_index': 0},
{'device_name': '/dev/vdb',
'source_type': 'volume',
'destination_type': 'volume',
'uuid': fakes.ID_OS_VOLUME_1,
'delete_on_termination': False,
'boot_index': -1},
{'device_name': 'vdc',
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 100,
'delete_on_termination': True,
'boot_index': -1},
]
result = instance_api._build_block_device_mapping(
fake_context, bdms, fakes.OSImage(fakes.OS_IMAGE_1))
self.assertEqual(expected, result)
fake_image_template = {
'id': fakes.random_os_id(),
'properties': {'root_device_name': '/dev/vda',
'bdm_v2': True,
'block_device_mapping': []}}
# check merging with image bdms
fake_image_template['properties']['block_device_mapping'] = [
{'boot_index': 0,
'device_name': '/dev/vda',
'source_type': 'snapshot',
'snapshot_id': fakes.ID_OS_SNAPSHOT_1,
'delete_on_termination': True},
{'device_name': 'vdb',
'source_type': 'snapshot',
'snapshot_id': fakes.random_os_id(),
'volume_size': 50},
{'device_name': '/dev/vdc',
'source_type': 'blank',
'volume_size': 10},
]
bdms = [
{'device_name': '/dev/vda',
'ebs': {'volume_size': 15}},
{'device_name': 'vdb',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2,
'delete_on_termination': False}},
{'device_name': '/dev/vdc',
'ebs': {'volume_size': 20}},
]
expected = [
{'device_name': '/dev/vda',
'source_type': 'snapshot',
'destination_type': 'volume',
'uuid': fakes.ID_OS_SNAPSHOT_1,
'delete_on_termination': True,
'volume_size': 15,
'boot_index': 0},
{'device_name': 'vdb',
'source_type': 'snapshot',
'destination_type': 'volume',
'uuid': fakes.ID_OS_SNAPSHOT_2,
'delete_on_termination': False,
'boot_index': -1},
{'device_name': '/dev/vdc',
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 20,
'delete_on_termination': False},
]
result = instance_api._build_block_device_mapping(
fake_context, bdms, fakes.OSImage(fake_image_template))
self.assertEqual(expected, result)
# check result order for adjusting some bdm of all
fake_image_template['properties']['block_device_mapping'] = [
{'device_name': '/dev/vdc',
'source_type': 'blank',
'volume_size': 10},
{'device_name': '/dev/vde',
'source_type': 'blank',
'volume_size': 10},
{'device_name': '/dev/vdf',
'source_type': 'blank',
'volume_size': 10},
{'boot_index': -1,
'source_type': 'blank',
'volume_size': 10},
]
bdms = [
{'device_name': '/dev/vdh',
'ebs': {'volume_size': 15}},
{'device_name': '/dev/vde',
'ebs': {'volume_size': 15}},
{'device_name': '/dev/vdb',
'ebs': {'volume_size': 15}},
]
expected = [
{'device_name': '/dev/vdh',
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 15,
'delete_on_termination': True,
'boot_index': -1},
{'device_name': '/dev/vde',
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 15,
'delete_on_termination': False},
{'device_name': '/dev/vdb',
'source_type': 'blank',
'destination_type': 'volume',
'volume_size': 15,
'delete_on_termination': True,
'boot_index': -1},
]
result = instance_api._build_block_device_mapping(
fake_context, bdms, fakes.OSImage(fake_image_template))
self.assertEqual(expected, result)
# check conflict of short and full device names
fake_image_template['properties']['block_device_mapping'] = [
{'device_name': '/dev/vdc',
'source_type': 'blank',
'volume_size': 10},
]
bdms = [
{'device_name': 'vdc',
'ebs': {'volume_size': 15}},
]
self.assertRaises(exception.InvalidBlockDeviceMapping,
instance_api._build_block_device_mapping,
fake_context, bdms,
fakes.OSImage(fake_image_template))
# opposit combination of the same case
fake_image_template['properties']['block_device_mapping'] = [
{'device_name': 'vdc',
'source_type': 'blank',
'volume_size': 10},
]
bdms = [
{'device_name': '/dev/vdc',
'ebs': {'volume_size': 15}},
]
self.assertRaises(exception.InvalidBlockDeviceMapping,
instance_api._build_block_device_mapping,
fake_context, bdms,
fakes.OSImage(fake_image_template))
# check fault on root device snapshot changing
fake_image_template['properties']['block_device_mapping'] = [
{'boot_index': 0,
'source_type': 'snapshot',
'snapshot_id': fakes.ID_EC2_SNAPSHOT_1},
]
bdms = [
{'device_name': '/dev/vda',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2}},
]
self.assertRaises(exception.InvalidBlockDeviceMapping,
instance_api._build_block_device_mapping,
fake_context, bdms,
fakes.OSImage(fake_image_template))
# same case for legacy bdm
fake_image_template['properties']['block_device_mapping'] = [
{'device_name': '/dev/vda',
'snapshot_id': fakes.ID_EC2_SNAPSHOT_1},
]
fake_image_template['properties']['bdm_v2'] = False
bdms = [
{'device_name': '/dev/vda',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2}},
]
self.assertRaises(exception.InvalidBlockDeviceMapping,
instance_api._build_block_device_mapping,
fake_context, bdms,
fakes.OSImage(fake_image_template))
# same case for legacy bdm with short names
fake_image_template['properties']['block_device_mapping'] = [
{'device_name': 'vda',
'snapshot_id': fakes.ID_EC2_SNAPSHOT_1},
]
fake_image_template['properties']['bdm_v2'] = False
bdms = [
{'device_name': 'vda',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_2}},
]
self.assertRaises(exception.InvalidBlockDeviceMapping,
instance_api._build_block_device_mapping,
fake_context, bdms,
fakes.OSImage(fake_image_template))
fake_image_template['properties']['bdm_v2'] = True
# check fault on reduce volume size
fake_image_template['properties']['block_device_mapping'] = [
{'device_name': 'vdc',
'source_type': 'blank',
'volume_size': 15},
]
bdms = [
{'device_name': '/dev/vdc',
'ebs': {'volume_size': 10}},
]
self.assertRaises(exception.InvalidBlockDeviceMapping,
instance_api._build_block_device_mapping,
fake_context, bdms,
fakes.OSImage(fake_image_template))
# check fault on set snapshot id if bdm doesn't have one
fake_image_template['properties']['block_device_mapping'] = [
{'device_name': 'vdc',
'source_type': 'blank',
'volume_size': 10},
]
bdms = [
{'device_name': '/dev/vdc',
'ebs': {'snapshot_id': fakes.ID_EC2_SNAPSHOT_1}},
]
self.assertRaises(exception.InvalidBlockDeviceMapping,
instance_api._build_block_device_mapping,
fake_context, bdms,
fakes.OSImage(fake_image_template))
@mock.patch('cinderclient.client.Client')
@mock.patch('novaclient.client.Client')
@mock.patch('ec2api.db.api.IMPL')
def test_format_instance(self, db_api, nova, cinder):
nova = nova.return_value
fake_context = base.create_context()
fake_flavors = {'fakeFlavorId': 'fake_flavor'}
instance = {'id': fakes.random_ec2_id('i'),
'os_id': fakes.random_os_id(),
'launch_index': 0}
os_instance = fakes.OSInstance_full({'id': instance['os_id'],
'flavor': {'id': 'fakeFlavorId'}})
# NOTE(ft): check instance state formatting
setattr(os_instance, 'OS-EXT-STS:vm_state', 'active')
formatted_instance = instance_api._format_instance(
fake_context, instance, os_instance, [], {},
None, None, fake_flavors, [])
self.assertEqual({'name': 'running', 'code': 16},
formatted_instance['instanceState'])
setattr(os_instance, 'OS-EXT-STS:vm_state', 'stopped')
formatted_instance = instance_api._format_instance(
fake_context, instance, os_instance, [], {},
None, None, fake_flavors, [])
self.assertEqual({'name': 'stopped', 'code': 80},
formatted_instance['instanceState'])
# NOTE(ft): check auto creating of DB item for unknown OS images
os_instance.image = {'id': fakes.random_os_id()}
kernel_id = fakes.random_os_id()
ramdisk_id = fakes.random_os_id()
setattr(os_instance, 'OS-EXT-SRV-ATTR:kernel_id', kernel_id)
setattr(os_instance, 'OS-EXT-SRV-ATTR:ramdisk_id', ramdisk_id)
formatted_instance = instance_api._format_instance(
fake_context, instance, os_instance, [], {},
None, None, fake_flavors, [])
db_api.add_item_id.assert_has_calls(
[mock.call(mock.ANY, 'ami', os_instance.image['id'], None),
mock.call(mock.ANY, 'aki', kernel_id, None),
mock.call(mock.ANY, 'ari', ramdisk_id, None)],
any_order=True)
@mock.patch('cinderclient.client.Client')
def test_format_instance_bdm(self, cinder):
id_os_instance_1 = fakes.random_os_id()
id_os_instance_2 = fakes.random_os_id()
cinder = cinder.return_value
cinder.volumes.list.return_value = [
fakes.OSVolume({'id': '2',
'status': 'attached',
'attachments': [{'device': '/dev/sdb1',
'server_id': id_os_instance_1}]}),
fakes.OSVolume({'id': '5',
'status': 'attached',
'attachments': [{'device': '/dev/sdb3',
'server_id': id_os_instance_1}]}),
fakes.OSVolume({'id': '21',
'status': 'attached',
'attachments': [{'device': 'vda',
'server_id': id_os_instance_2}]}),
]
os_instance_1 = fakes.OSInstance_full({
'id': id_os_instance_1,
'volumes_attached': [{'id': '2',
'delete_on_termination': False},
{'id': '5',
'delete_on_termination': True}],
'root_device_name': '/dev/sdb1'})
os_instance_2 = fakes.OSInstance_full({
'id': id_os_instance_2,
'volumes_attached': [{'id': '21',
'delete_on_termination': False}],
'root_device_name': '/dev/sdc1'})
db_volumes_1 = {'2': {'id': 'vol-00000002'},
'5': {'id': 'vol-00000005'}}
fake_context = base.create_context()
result = {}
instance_api._cloud_format_instance_bdm(
fake_context, os_instance_1, result, db_volumes_1)
self.assertThat(
result,
matchers.DictMatches({
'rootDeviceType': 'ebs',
'blockDeviceMapping': [
{'deviceName': '/dev/sdb1',
'ebs': {'status': 'attached',
'deleteOnTermination': False,
'volumeId': 'vol-00000002',
}},
{'deviceName': '/dev/sdb3',
'ebs': {'status': 'attached',
'deleteOnTermination': True,
'volumeId': 'vol-00000005',
}}]},
orderless_lists=True), verbose=True)
result = {}
with mock.patch('ec2api.db.api.IMPL') as db_api:
db_api.get_items.return_value = [{'id': 'vol-00000015',
'os_id': '21'}]
instance_api._cloud_format_instance_bdm(
fake_context, os_instance_2, result)
self.assertThat(
result,
matchers.DictMatches({
'rootDeviceType': 'instance-store',
'blockDeviceMapping': [
{'deviceName': 'vda',
'ebs': {'status': 'attached',
'deleteOnTermination': False,
'volumeId': 'vol-00000015',
}}]}))
@mock.patch('cinderclient.client.Client')
def test_format_instance_bdm_while_attaching_volume(self, cinder):
id_os_instance = fakes.random_os_id()
cinder = cinder.return_value
cinder.volumes.list.return_value = [
fakes.OSVolume({'id': '2',
'status': 'attaching',
'attachments': [{'device': '/dev/sdb1',
'server_id': id_os_instance}]})]
os_instance = fakes.OSInstance_full({
'id': id_os_instance,
'volumes_attached': [{'id': '2',
'delete_on_termination': False}],
'root_device_name': '/dev/vda'})
fake_context = base.create_context()
result = {}
instance_api._cloud_format_instance_bdm(
fake_context, os_instance, result,
{'2': {'id': 'vol-00000002'}})
self.assertThat(
result,
matchers.DictMatches({
'rootDeviceType': 'instance-store',
'blockDeviceMapping': [
{'deviceName': '/dev/sdb1',
'ebs': {'status': 'attaching',
'deleteOnTermination': False,
'volumeId': 'vol-00000002',
}}]}))
def test_format_instance_bdm_no_bdm(self):
context = base.create_context()
os_instance_id = fakes.random_os_id()
os_instance = fakes.OSInstance_full({'id': os_instance_id})
res = {}
setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', None)
instance_api._cloud_format_instance_bdm(
context, os_instance, res, {}, {os_instance_id: []})
self.assertEqual({}, res)
res = {}
setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '')
instance_api._cloud_format_instance_bdm(
context, os_instance, res, {}, {os_instance_id: []})
self.assertEqual({}, res)
res = {}
setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '/dev/vdd')
instance_api._cloud_format_instance_bdm(
context, os_instance, res, {}, {os_instance_id: []})
self.assertEqual({'rootDeviceType': 'instance-store'}, res)
@mock.patch('ec2api.api.instance._remove_instances')
@mock.patch('novaclient.client.Client')
def test_get_os_instances_by_instances(self, nova, remove_instances):
nova = nova.return_value
fake_context = base.create_context()
os_instance_1 = fakes.OSInstance(fakes.OS_INSTANCE_1)
os_instance_2 = fakes.OSInstance(fakes.OS_INSTANCE_2)
def do_check(exactly_flag=None, specify_nova_client=False):
nova.servers.get.side_effect = [os_instance_1,
nova_exception.NotFound(404),
os_instance_2]
absent_instance = {'id': fakes.random_ec2_id('i'),
'os_id': fakes.random_os_id()}
params = (fake_context, [fakes.DB_INSTANCE_1, absent_instance,
fakes.DB_INSTANCE_2],
exactly_flag, nova if specify_nova_client else False)
if exactly_flag:
self.assertRaises(exception.InvalidInstanceIDNotFound,
instance_api._get_os_instances_by_instances,
*params)
else:
res = instance_api._get_os_instances_by_instances(*params)
self.assertEqual([os_instance_1, os_instance_2],
res)
remove_instances.assert_called_once_with(fake_context,
[absent_instance])
remove_instances.reset_mock()
do_check(exactly_flag=True)
# NOTE(ft): stop to return fake data by the mocked client and create
# a new one to pass it into the function
nova.servers.side_effect = None
nova = mock.Mock()
do_check(specify_nova_client=True)
@mock.patch('ec2api.api.network_interface.delete_network_interface')
@mock.patch('ec2api.api.network_interface._detach_network_interface_item')
@mock.patch('ec2api.db.api.IMPL')
def test_remove_instances(self, db_api, detach_network_interface_item,
delete_network_interface):
fake_context = base.create_context()
instances = [{'id': fakes.random_ec2_id('i')}
for dummy in range(4)]
network_interfaces = [
{'id': fakes.random_ec2_id('eni'),
'instance_id': inst['id'],
'delete_on_termination': num in (0, 1, 4, 6)}
for num, inst in enumerate(itertools.chain(
*(list(zip(instances[:3], instances[:3])) +
[[{'id': fakes.random_ec2_id('i')}] * 2])))]
network_interfaces.extend({'id': fakes.random_ec2_id('eni')}
for dummy in range(2))
instances_to_remove = instances[:2] + [instances[3]]
network_interfaces_to_delete = network_interfaces[0:2]
network_interfaces_to_detach = network_interfaces[0:4]
db_api.get_items.side_effect = tools.get_db_api_get_items(
*network_interfaces)
instance_api._remove_instances(fake_context, instances_to_remove)
for eni in network_interfaces_to_detach:
detach_network_interface_item.assert_any_call(fake_context,
eni)
for eni in network_interfaces_to_delete:
delete_network_interface.assert_any_call(fake_context,
eni['id'])
@mock.patch('cinderclient.client.Client')
def test_get_os_volumes(self, cinder):
cinder = cinder.return_value
context = base.create_context()
os_volume_ids = [fakes.random_os_id() for _i in range(5)]
os_instance_ids = [fakes.random_os_id() for _i in range(2)]
os_volumes = [
fakes.OSVolume(
{'id': os_volume_ids[0],
'status': 'attached',
'attachments': [{'server_id': os_instance_ids[0]}]}),
fakes.OSVolume(
{'id': os_volume_ids[1],
'status': 'attaching',
'attachments': []}),
fakes.OSVolume(
{'id': os_volume_ids[2],
'status': 'detaching',
'attachments': [{'server_id': os_instance_ids[0]}]}),
fakes.OSVolume(
{'id': os_volume_ids[3],
'status': 'attached',
'attachments': [{'server_id': os_instance_ids[1]}]}),
fakes.OSVolume(
{'id': os_volume_ids[4],
'status': 'available',
'attachments': []}),
]
cinder.volumes.list.return_value = os_volumes
res = instance_api._get_os_volumes(context)
self.assertIn(os_instance_ids[0], res)
self.assertIn(os_instance_ids[1], res)
self.assertEqual([os_volumes[0], os_volumes[2]],
res[os_instance_ids[0]])
self.assertEqual([os_volumes[3]], res[os_instance_ids[1]])
cinder.volumes.list.assert_called_once_with(search_opts=None)
context.is_os_admin = True
instance_api._get_os_volumes(context)
cinder.volumes.list.assert_called_with(
search_opts={'all_tenants': True,
'project_id': context.project_id})
@mock.patch('ec2api.clients.nova', wraps=ec2api.clients.nova)
@mock.patch('ec2api.context.get_os_admin_context')
@mock.patch('cinderclient.client.Client')
@mock.patch('novaclient.client.Client')
def test_is_ebs_instance(self, nova, cinder, get_os_admin_context,
nova_client_getter):
nova = nova.return_value
cinder = cinder.return_value
context = base.create_context()
os_instance = fakes.OSInstance_full({'id': fakes.random_os_id()})
nova.servers.get.return_value = os_instance
cinder.volumes.list.return_value = []
self.assertFalse(instance_api._is_ebs_instance(context,
os_instance.id))
cinder.volumes.list.return_value = [
fakes.OSVolume(
{'id': fakes.random_os_id(),
'status': 'attached',
'attachments': [{'device': '/dev/vda',
'server_id': os_instance.id}]})]
setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '')
self.assertFalse(instance_api._is_ebs_instance(context,
os_instance.id))
setattr(os_instance, 'OS-EXT-SRV-ATTR:root_device_name', '/dev/vda')
cinder.volumes.list.return_value = []
self.assertFalse(instance_api._is_ebs_instance(context,
os_instance.id))
cinder.volumes.list.return_value = [
fakes.OSVolume(
{'id': fakes.random_os_id(),
'status': 'attached',
'attachments': [{'device': '/dev/vda',
'server_id': fakes.random_os_id()}]})]
self.assertFalse(instance_api._is_ebs_instance(context,
os_instance.id))
cinder.volumes.list.return_value = [
fakes.OSVolume(
{'id': fakes.random_os_id(),
'status': 'attached',
'attachments': [{'device': '/dev/vdb',
'server_id': os_instance.id}]})]
self.assertFalse(instance_api._is_ebs_instance(context,
os_instance.id))
cinder.volumes.list.return_value = [
fakes.OSVolume(
{'id': fakes.random_os_id(),
'status': 'attached',
'attachments': [{'device': '/dev/vda',
'server_id': os_instance.id}]})]
self.assertTrue(instance_api._is_ebs_instance(context,
os_instance.id))
nova_client_getter.assert_called_with(
get_os_admin_context.return_value)
cinder.volumes.list.assert_called_with(search_opts=None)
cinder.volumes.list.return_value = [
fakes.OSVolume(
{'id': fakes.random_os_id(),
'status': 'attached',
'attachments': [{'device': 'vda',
'server_id': os_instance.id}]})]
self.assertTrue(instance_api._is_ebs_instance(context,
os_instance.id))
|
|
#!/usr/bin/python
import os
import re
import json
from lxml import etree as et
import pcbmode.config as config
from . import messages as msg
# pcbmode modules
from . import svg
from . import utils
from . import place
import copy
from .style import Style
from .point import Point
from .shape import Shape
class Footprint():
"""
"""
def __init__(self, footprint):
self._footprint = footprint
self._shapes = {'conductor': {},
'pours': {},
'soldermask': {},
'silkscreen': {},
'assembly': {},
'solderpaste': {},
'drills': {}}
self._processPins()
self._processPours()
self._processShapes()
self._processAssemblyShapes()
def getShapes(self):
return self._shapes
def _processPins(self):
"""
Converts pins into 'shapes'
"""
pins = self._footprint.get('pins') or {}
for pin in pins:
pin_location = pins[pin]['layout']['location'] or [0, 0]
try:
pad_name = pins[pin]['layout']['pad']
except:
msg.error("Each defined 'pin' must have a 'pad' name that is defined in the 'pads' dection of the footprint.")
try:
pad_dict = self._footprint['pads'][pad_name]
except:
msg.error("There doesn't seem to be a pad definition for pad '%s'." % pad_name)
# Get the pin's rotation, if any
pin_rotate = pins[pin]['layout'].get('rotate') or 0
shapes = pad_dict.get('shapes') or []
for shape_dict in shapes:
shape_dict = shape_dict.copy()
# Which layer(s) to place the shape on
layers = utils.getExtendedLayerList(shape_dict.get('layers') or ['top'])
# Add the pin's location to the pad's location
shape_location = shape_dict.get('location') or [0, 0]
shape_dict['location'] = [shape_location[0] + pin_location[0],
shape_location[1] + pin_location[1]]
# Add the pin's rotation to the pad's rotation
shape_dict['rotate'] = (shape_dict.get('rotate') or 0) + pin_rotate
# Determine if and which label to show
show_name = pins[pin]['layout'].get('show-label') or True
if show_name == True:
pin_label = pins[pin]['layout'].get('label') or pin
for layer in layers:
shape = Shape(shape_dict)
style = Style(shape_dict, 'conductor')
shape.setStyle(style)
try:
self._shapes['conductor'][layer].append(shape)
except:
self._shapes['conductor'][layer] = []
self._shapes['conductor'][layer].append(shape)
for stype in ['soldermask','solderpaste']:
# Get a custom shape specification if it exists
sdict_list = shape_dict.get(stype)
# Not defined; default
if sdict_list == None:
# Use default settings for shape based on
# the pad shape
sdict = shape_dict.copy()
# Which shape type is the pad?
shape_type = shape.getType()
# Apply modifier based on shape type
if shape_type == 'path':
sdict['scale'] = shape.getScale()*config.brd['distances'][stype]['path-scale']
elif shape_type in ['rect', 'rectangle']:
sdict['width'] += config.brd['distances'][stype]['rect-buffer']
sdict['height'] += config.brd['distances'][stype]['rect-buffer']
elif shape_type in ['circ', 'circle']:
sdict['diameter'] += config.brd['distances'][stype]['circle-buffer']
else:
pass
# Create shape based on new dictionary
sshape = Shape(sdict)
# Define style
sstyle = Style(sdict, stype)
# Apply style
sshape.setStyle(sstyle)
# Add shape to footprint's shape dictionary
#self._shapes[stype][layer].append(sshape)
try:
self._shapes[stype][layer].append(sshape)
except:
self._shapes[stype][layer] = []
self._shapes[stype][layer].append(sshape)
# Do not place shape
elif (sdict_list == {}) or (sdict_list == []):
pass
# Custom shape definition
else:
# If dict (as before support of multiple
# shapes) then append to a single element
# list
if type(sdict_list) is dict:
sdict_list = [sdict_list]
# Process list of shapes
for sdict_ in sdict_list:
sdict = sdict_.copy()
shape_loc = utils.toPoint(sdict.get('location') or [0, 0])
# Apply rotation
sdict['rotate'] = (sdict.get('rotate') or 0) + pin_rotate
# Rotate location
shape_loc.rotate(pin_rotate, Point())
sdict['location'] = [shape_loc.x + pin_location[0],
shape_loc.y + pin_location[1]]
# Create new shape
sshape = Shape(sdict)
# Create new style
sstyle = Style(sdict, stype)
# Apply style
sshape.setStyle(sstyle)
# Add shape to footprint's shape dictionary
#self._shapes[stype][layer].append(sshape)
try:
self._shapes[stype][layer].append(sshape)
except:
self._shapes[stype][layer] = []
self._shapes[stype][layer].append(sshape)
# Add pin label
if (pin_label != None):
shape.setLabel(pin_label)
drills = pad_dict.get('drills') or []
for drill_dict in drills:
drill_dict = drill_dict.copy()
drill_dict['type'] = drill_dict.get('type') or 'drill'
drill_location = drill_dict.get('location') or [0, 0]
drill_dict['location'] = [drill_location[0] + pin_location[0],
drill_location[1] + pin_location[1]]
shape = Shape(drill_dict)
style = Style(drill_dict, 'drills')
shape.setStyle(style)
try:
self._shapes['drills']['top'].append(shape)
except:
self._shapes['drills']['top'] = []
self._shapes['drills']['top'].append(shape)
def _processPours(self):
"""
"""
try:
shapes = self._footprint['layout']['pours']['shapes']
except:
return
for shape_dict in shapes:
layers = utils.getExtendedLayerList(shape_dict.get('layers') or ['top'])
for layer in layers:
shape = Shape(shape_dict)
style = Style(shape_dict, 'conductor', 'pours')
shape.setStyle(style)
try:
self._shapes['pours'][layer].append(shape)
except:
self._shapes['pours'][layer] = []
self._shapes['pours'][layer].append(shape)
def _processShapes(self):
"""
"""
sheets = ['conductor', 'silkscreen', 'soldermask']
for sheet in sheets:
try:
shapes = self._footprint['layout'][sheet]['shapes']
except:
shapes = []
for shape_dict in shapes:
layers = utils.getExtendedLayerList(shape_dict.get('layers') or ['top'])
for layer in layers:
# Mirror the shape if it's text and on bottom later,
# but let explicit shape setting override
if layer == 'bottom':
if shape_dict['type'] == 'text':
shape_dict['mirror'] = shape_dict.get('mirror') or 'True'
shape = Shape(shape_dict)
style = Style(shape_dict, sheet)
shape.setStyle(style)
try:
self._shapes[sheet][layer].append(shape)
except:
self._shapes[sheet][layer] = []
self._shapes[sheet][layer].append(shape)
def _processAssemblyShapes(self):
"""
"""
try:
shapes = self._footprint['layout']['assembly']['shapes']
except:
return
for shape_dict in shapes:
layers = utils.getExtendedLayerList(shape_dict.get('layer') or ['top'])
for layer in layers:
shape = Shape(shape_dict)
style = Style(shape_dict, 'assembly')
shape.setStyle(style)
try:
self._shapes['assembly'][layer].append(shape)
except:
self._shapes['assembly'][layer] = []
self._shapes['assembly'][layer].append(shape)
|
|
# Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Violin Memory 6000 Series All-Flash Array Fibrechannel Driver
"""
import mock
from oslo_utils import units
from cinder import context
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_vmem_client as vmemclient
from cinder.volume import configuration as conf
from cinder.volume.drivers.violin import v6000_common
from cinder.volume.drivers.violin import v6000_fcp
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {
"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {
"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": VOLUME_ID,
"volume_name": "volume-" + VOLUME_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"volume": VOLUME,
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
CONNECTOR = {
"initiator": INITIATOR_IQN,
"host": "irrelevant",
'wwpns': [u'50014380186b3f65', u'50014380186b3f67'],
}
FC_TARGET_WWPNS = [
'31000024ff45fb22', '21000024ff45fb23',
'51000024ff45f1be', '41000024ff45f1bf'
]
FC_INITIATOR_WWPNS = [
'50014380186b3f65', '50014380186b3f67'
]
FC_FABRIC_MAP = {
'fabricA':
{'target_port_wwn_list': [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[0]]},
'fabricB':
{'target_port_wwn_list': [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[1]]}
}
FC_INITIATOR_TARGET_MAP = {
FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]]
}
class V6000FCPDriverTestCase(test.TestCase):
"""Test cases for VMEM FCP driver."""
def setUp(self):
super(V6000FCPDriverTestCase, self).setUp()
self.conf = self.setup_configuration()
self.driver = v6000_fcp.V6000FCDriver(configuration=self.conf)
self.driver.common.container = 'myContainer'
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
self.driver.gateway_fc_wwns = FC_TARGET_WWPNS
self.stats = {}
self.driver.set_initialized()
def tearDown(self):
super(V6000FCPDriverTestCase, self).tearDown()
def setup_configuration(self):
config = mock.Mock(spec=conf.Configuration)
config.volume_backend_name = 'v6000_fcp'
config.san_ip = '1.1.1.1'
config.san_login = 'admin'
config.san_password = ''
config.san_thin_provision = False
config.san_is_local = False
config.gateway_mga = '2.2.2.2'
config.gateway_mgb = '3.3.3.3'
config.use_igroups = False
config.request_timeout = 300
config.container = 'myContainer'
return config
def setup_mock_vshare(self, m_conf=None):
"""Create a fake VShare communication object."""
_m_vshare = mock.Mock(name='VShare',
version='1.1.1',
spec=vmemclient.mock_client_conf)
if m_conf:
_m_vshare.configure_mock(**m_conf)
return _m_vshare
@mock.patch.object(v6000_common.V6000Common, 'check_for_setup_error')
def test_check_for_setup_error(self, m_setup_func):
"""No setup errors are found."""
result = self.driver.check_for_setup_error()
m_setup_func.assert_called_with()
self.assertTrue(result is None)
@mock.patch.object(v6000_common.V6000Common, 'check_for_setup_error')
def test_check_for_setup_error_no_wwn_config(self, m_setup_func):
"""No wwns were found during setup."""
self.driver.gateway_fc_wwns = []
self.assertRaises(exception.ViolinInvalidBackendConfig,
self.driver.check_for_setup_error)
def test_create_volume(self):
"""Volume created successfully."""
self.driver.common._create_lun = mock.Mock()
result = self.driver.create_volume(VOLUME)
self.driver.common._create_lun.assert_called_with(VOLUME)
self.assertTrue(result is None)
def test_delete_volume(self):
"""Volume deleted successfully."""
self.driver.common._delete_lun = mock.Mock()
result = self.driver.delete_volume(VOLUME)
self.driver.common._delete_lun.assert_called_with(VOLUME)
self.assertTrue(result is None)
def test_create_snapshot(self):
"""Snapshot created successfully."""
self.driver.common._create_lun_snapshot = mock.Mock()
result = self.driver.create_snapshot(SNAPSHOT)
self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertTrue(result is None)
def test_delete_snapshot(self):
"""Snapshot deleted successfully."""
self.driver.common._delete_lun_snapshot = mock.Mock()
result = self.driver.delete_snapshot(SNAPSHOT)
self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertTrue(result is None)
@mock.patch.object(context, 'get_admin_context')
def test_create_volume_from_snapshot(self, m_context_func):
"""Volume created from a snapshot successfully."""
m_context_func.return_value = None
self.driver.common._create_lun = mock.Mock()
self.driver.copy_volume_data = mock.Mock()
result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
m_context_func.assert_called_with()
self.driver.common._create_lun.assert_called_with(VOLUME)
self.driver.copy_volume_data.assert_called_with(None, SNAPSHOT, VOLUME)
self.assertTrue(result is None)
@mock.patch.object(context, 'get_admin_context')
def test_create_cloned_volume(self, m_context_func):
"""Volume clone created successfully."""
m_context_func.return_value = None
self.driver.common._create_lun = mock.Mock()
self.driver.copy_volume_data = mock.Mock()
result = self.driver.create_cloned_volume(VOLUME, SRC_VOL)
m_context_func.assert_called_with()
self.driver.common._create_lun.assert_called_with(VOLUME)
self.driver.copy_volume_data.assert_called_with(None, SRC_VOL, VOLUME)
self.assertTrue(result is None)
def test_initialize_connection(self):
lun_id = 1
igroup = None
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
volume = mock.Mock(spec=models.Volume)
self.driver.common.vip = self.setup_mock_vshare()
self.driver._export_lun = mock.Mock(return_value=lun_id)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.initialize_connection(volume, CONNECTOR)
self.driver._export_lun.assert_called_with(volume, CONNECTOR, igroup)
self.driver.common.vip.basic.save_config.assert_called_with()
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertTrue(props['data']['target_discovered'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(lun_id, props['data']['target_lun'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_initialize_connection_with_snapshot_object(self):
lun_id = 1
igroup = None
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
snapshot = mock.Mock(spec=models.Snapshot)
self.driver.common.vip = self.setup_mock_vshare()
self.driver._export_snapshot = mock.Mock(return_value=lun_id)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.initialize_connection(snapshot, CONNECTOR)
self.driver._export_snapshot.assert_called_with(
snapshot, CONNECTOR, igroup)
self.driver.common.vip.basic.save_config.assert_called_with()
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertTrue(props['data']['target_discovered'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(lun_id, props['data']['target_lun'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_terminate_connection(self):
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
volume = mock.Mock(spec=models.Volume)
self.driver.common.vip = self.setup_mock_vshare()
self.driver._unexport_lun = mock.Mock()
self.driver._is_initiator_connected_to_array = mock.Mock(
return_value=False)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.terminate_connection(volume, CONNECTOR)
self.driver._unexport_lun.assert_called_with(volume)
self.driver.common.vip.basic.save_config.assert_called_with()
self.driver._is_initiator_connected_to_array.assert_called_with(
CONNECTOR)
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_terminate_connection_snapshot_object(self):
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
snapshot = mock.Mock(spec=models.Snapshot)
self.driver.common.vip = self.setup_mock_vshare()
self.driver._unexport_snapshot = mock.Mock()
self.driver._is_initiator_connected_to_array = mock.Mock(
return_value=False)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.terminate_connection(snapshot, CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_get_volume_stats(self):
self.driver._update_stats = mock.Mock()
self.driver._update_stats()
result = self.driver.get_volume_stats(True)
self.driver._update_stats.assert_called_with()
self.assertEqual(self.driver.stats, result)
def test_export_lun(self):
lun_id = '1'
igroup = 'test-igroup-1'
response = {'code': 0, 'message': ''}
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd_and_verify = mock.Mock(
return_value=response)
self.driver.common._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._export_lun(VOLUME, CONNECTOR, igroup)
self.driver.common._send_cmd_and_verify.assert_called_with(
self.driver.common.vip.lun.export_lun,
self.driver.common._wait_for_export_config, '',
[self.driver.common.container, VOLUME['id'], 'all',
igroup, 'auto'], [VOLUME['id'], 'state=True'])
self.driver.common._get_lun_id.assert_called_with(VOLUME['id'])
self.assertEqual(lun_id, result)
def test_export_lun_fails_with_exception(self):
lun_id = '1'
igroup = 'test-igroup-1'
response = {'code': 14000, 'message': 'Generic error'}
failure = exception.ViolinBackendErr
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd_and_verify = mock.Mock(
side_effect=failure(response['message']))
self.driver.common._get_lun_id = mock.Mock(return_value=lun_id)
self.assertRaises(failure, self.driver._export_lun,
VOLUME, CONNECTOR, igroup)
def test_unexport_lun(self):
response = {'code': 0, 'message': ''}
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd_and_verify = mock.Mock(
return_value=response)
result = self.driver._unexport_lun(VOLUME)
self.driver.common._send_cmd_and_verify.assert_called_with(
self.driver.common.vip.lun.unexport_lun,
self.driver.common._wait_for_export_config, '',
[self.driver.common.container, VOLUME['id'], 'all', 'all', 'auto'],
[VOLUME['id'], 'state=False'])
self.assertTrue(result is None)
def test_unexport_lun_fails_with_exception(self):
response = {'code': 14000, 'message': 'Generic error'}
failure = exception.ViolinBackendErr
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd_and_verify = mock.Mock(
side_effect=failure(response['message']))
self.assertRaises(failure, self.driver._unexport_lun, VOLUME)
def test_export_snapshot(self):
lun_id = '1'
igroup = 'test-igroup-1'
response = {'code': 0, 'message': ''}
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd = mock.Mock(return_value=response)
self.driver.common._wait_for_export_config = mock.Mock()
self.driver.common._get_snapshot_id = mock.Mock(return_value=lun_id)
result = self.driver._export_snapshot(SNAPSHOT, CONNECTOR, igroup)
self.driver.common._send_cmd.assert_called_with(
self.driver.common.vip.snapshot.export_lun_snapshot, '',
self.driver.common.container, SNAPSHOT['volume_id'],
SNAPSHOT['id'], igroup, 'all', 'auto')
self.driver.common._wait_for_export_config.assert_called_with(
SNAPSHOT['volume_id'], SNAPSHOT['id'], state=True)
self.driver.common._get_snapshot_id.assert_called_once_with(
SNAPSHOT['volume_id'], SNAPSHOT['id'])
self.assertEqual(lun_id, result)
def test_unexport_snapshot(self):
response = {'code': 0, 'message': ''}
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd = mock.Mock(return_value=response)
self.driver.common._wait_for_export_config = mock.Mock()
result = self.driver._unexport_snapshot(SNAPSHOT)
self.driver.common._send_cmd.assert_called_with(
self.driver.common.vip.snapshot.unexport_lun_snapshot, '',
self.driver.common.container, SNAPSHOT['volume_id'],
SNAPSHOT['id'], 'all', 'all', 'auto', False)
self.driver.common._wait_for_export_config.assert_called_with(
SNAPSHOT['volume_id'], SNAPSHOT['id'], state=False)
self.assertTrue(result is None)
def test_add_igroup_member(self):
igroup = 'test-group-1'
response = {'code': 0, 'message': 'success'}
wwpns = ['wwn.50:01:43:80:18:6b:3f:65', 'wwn.50:01:43:80:18:6b:3f:67']
conf = {
'igroup.add_initiators.return_value': response,
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._convert_wwns_openstack_to_vmem = mock.Mock(
return_value=wwpns)
result = self.driver._add_igroup_member(CONNECTOR, igroup)
self.driver._convert_wwns_openstack_to_vmem.assert_called_with(
CONNECTOR['wwpns'])
self.driver.common.vip.igroup.add_initiators.assert_called_with(
igroup, wwpns)
self.assertTrue(result is None)
def test_build_initiator_target_map(self):
"""Successfully build a map when zoning is enabled."""
expected_targ_wwns = FC_TARGET_WWPNS
self.driver.lookup_service = mock.Mock()
self.driver.lookup_service.get_device_mapping_from_network.\
return_value = FC_FABRIC_MAP
(targ_wwns, init_targ_map) = \
self.driver._build_initiator_target_map(CONNECTOR)
self.driver.lookup_service.get_device_mapping_from_network.\
assert_called_with(CONNECTOR['wwpns'], self.driver.gateway_fc_wwns)
self.assertEqual(set(expected_targ_wwns), set(targ_wwns))
i = FC_INITIATOR_WWPNS[0]
self.assertIn(FC_TARGET_WWPNS[0], init_targ_map[i])
self.assertIn(FC_TARGET_WWPNS[1], init_targ_map[i])
self.assertEqual(2, len(init_targ_map[i]))
i = FC_INITIATOR_WWPNS[1]
self.assertIn(FC_TARGET_WWPNS[2], init_targ_map[i])
self.assertIn(FC_TARGET_WWPNS[3], init_targ_map[i])
self.assertEqual(2, len(init_targ_map[i]))
self.assertEqual(2, len(init_targ_map))
def test_build_initiator_target_map_no_lookup_service(self):
"""Successfully build a map when zoning is disabled."""
expected_targ_wwns = FC_TARGET_WWPNS
expected_init_targ_map = {
CONNECTOR['wwpns'][0]: FC_TARGET_WWPNS,
CONNECTOR['wwpns'][1]: FC_TARGET_WWPNS
}
self.driver.lookup_service = None
targ_wwns, init_targ_map = self.driver._build_initiator_target_map(
CONNECTOR)
self.assertEqual(expected_targ_wwns, targ_wwns)
self.assertEqual(expected_init_targ_map, init_targ_map)
def test_is_initiator_connected_to_array(self):
"""Successfully finds an initiator with remaining active session."""
converted_wwpns = ['50:01:43:80:18:6b:3f:65',
'50:01:43:80:18:6b:3f:67']
prefix = "/vshare/config/export/container"
bn = "%s/%s/lun/**" % (prefix, self.driver.common.container)
resp_binding0 = "%s/%s/lun/%s/target/hba-a1/initiator/%s" \
% (prefix, self.driver.common.container, VOLUME['id'],
converted_wwpns[0])
resp_binding1 = "%s/%s/lun/%s/target/hba-a1/initiator/%s" \
% (prefix, self.driver.common.container, VOLUME['id'],
converted_wwpns[1])
response = {
resp_binding0: converted_wwpns[0],
resp_binding1: converted_wwpns[1]
}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._convert_wwns_openstack_to_vmem = mock.Mock(
return_value=converted_wwpns)
self.assertTrue(self.driver._is_initiator_connected_to_array(
CONNECTOR))
self.driver.common.vip.basic.get_node_values.assert_called_with(bn)
def test_is_initiator_connected_to_array_empty_response(self):
"""Successfully finds no initiators with remaining active sessions."""
converted_wwpns = ['50:01:43:80:18:6b:3f:65',
'50:01:43:80:18:6b:3f:67']
response = {}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._convert_wwns_openstack_to_vmem = mock.Mock(
return_value=converted_wwpns)
self.assertFalse(self.driver._is_initiator_connected_to_array(
CONNECTOR))
def test_update_stats(self):
backend_name = self.conf.volume_backend_name
vendor_name = "Violin Memory, Inc."
tot_bytes = 100 * units.Gi
free_bytes = 50 * units.Gi
bn0 = '/cluster/state/master_id'
bn1 = "/vshare/state/global/1/container/myContainer/total_bytes"
bn2 = "/vshare/state/global/1/container/myContainer/free_bytes"
response1 = {bn0: '1'}
response2 = {bn1: tot_bytes, bn2: free_bytes}
conf = {
'basic.get_node_values.side_effect': [response1, response2],
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
result = self.driver._update_stats()
calls = [mock.call(bn0), mock.call([bn1, bn2])]
self.driver.common.vip.basic.get_node_values.assert_has_calls(calls)
self.assertEqual(100, self.driver.stats['total_capacity_gb'])
self.assertEqual(50, self.driver.stats['free_capacity_gb'])
self.assertEqual(backend_name,
self.driver.stats['volume_backend_name'])
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
self.assertTrue(result is None)
def test_update_stats_fails_data_query(self):
backend_name = self.conf.volume_backend_name
vendor_name = "Violin Memory, Inc."
bn0 = '/cluster/state/master_id'
response1 = {bn0: '1'}
response2 = {}
conf = {
'basic.get_node_values.side_effect': [response1, response2],
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
self.assertTrue(self.driver._update_stats() is None)
self.assertEqual(0, self.driver.stats['total_capacity_gb'])
self.assertEqual(0, self.driver.stats['free_capacity_gb'])
self.assertEqual(backend_name,
self.driver.stats['volume_backend_name'])
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
def test_update_stats_fails_data_query_but_has_cached_stats(self):
"""Stats query to backend fails, but cached stats are available. """
backend_name = self.conf.volume_backend_name
vendor_name = "Violin Memory, Inc."
bn0 = '/cluster/state/master_id'
response1 = {bn0: '1'}
response2 = {}
# fake cached stats, from a previous stats query
self.driver.stats = {'free_capacity_gb': 50, 'total_capacity_gb': 100}
conf = {
'basic.get_node_values.side_effect': [response1, response2],
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
self.assertIsNone(self.driver._update_stats())
self.assertEqual(100, self.driver.stats['total_capacity_gb'])
self.assertEqual(50, self.driver.stats['free_capacity_gb'])
self.assertEqual(backend_name,
self.driver.stats['volume_backend_name'])
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
def test_get_active_fc_targets(self):
bn0 = '/vshare/state/global/*'
response0 = {'/vshare/state/global/1': 1,
'/vshare/state/global/2': 2}
bn1 = '/vshare/state/global/1/target/fc/**'
response1 = {'/vshare/state/global/1/target/fc/hba-a1/wwn':
'wwn.21:00:00:24:ff:45:fb:22'}
bn2 = '/vshare/state/global/2/target/fc/**'
response2 = {'/vshare/state/global/2/target/fc/hba-a1/wwn':
'wwn.21:00:00:24:ff:45:e2:30'}
wwpns = ['21000024ff45fb22', '21000024ff45e230']
conf = {
'basic.get_node_values.side_effect':
[response0, response1, response2],
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
result = self.driver._get_active_fc_targets()
calls = [mock.call(bn0), mock.call(bn1), mock.call(bn2)]
self.driver.common.vip.basic.get_node_values.assert_has_calls(
calls, any_order=True)
self.assertEqual(wwpns, result)
def test_convert_wwns_openstack_to_vmem(self):
vmem_wwns = ['wwn.50:01:43:80:18:6b:3f:65']
openstack_wwns = ['50014380186b3f65']
result = self.driver._convert_wwns_openstack_to_vmem(openstack_wwns)
self.assertEqual(vmem_wwns, result)
def test_convert_wwns_vmem_to_openstack(self):
vmem_wwns = ['wwn.50:01:43:80:18:6b:3f:65']
openstack_wwns = ['50014380186b3f65']
result = self.driver._convert_wwns_vmem_to_openstack(vmem_wwns)
self.assertEqual(openstack_wwns, result)
|
|
import h2o
from h2o.estimators import H2OGradientBoostingEstimator, H2OGeneralizedLinearEstimator
from tests import pyunit_utils
import os
import sys
from pandas.testing import assert_frame_equal
TEMPLATE = """
import java.util.HashMap;
import java.util.Map;
import hex.genmodel.GenModel;
import hex.genmodel.annotations.ModelPojo;
public class %s extends GenModel {
public hex.ModelCategory getModelCategory() { return hex.ModelCategory.Regression; }
public boolean isSupervised() { return true; }
public int nfeatures() { return 19; }
public int nclasses() { return 1; } // use "1" for regression
// Names of columns used by model
public static final String[] NAMES = new String[] {
"Bias",
"MaxWindPeriod",
"ChangeWindDirect",
"PressureChange",
"ChangeTempMag",
"EvapMM",
"MaxWindSpeed",
"Temp9am",
"RelHumid9am",
"Cloud9am",
"WindSpeed9am",
"Pressure9am",
"Temp3pm",
"RelHumid3pm",
"Cloud3pm",
"WindSpeed3pm",
"Pressure3pm",
"RainToday",
"TempRange"
};
// Derived features (we calculate ourselves in score0 implementation)
private static final String[] CALCULATED = new String[] {
"ChangeTemp",
"ChangeTempDir"
};
// Column domains, null means column is numerical
public static final String[][] DOMAINS = new String[][] {
/* Bias */ null,
/* MaxWindPeriod */ {"NA", "earlyAM", "earlyPM", "lateAM", "latePM"},
/* ChangeWindDirect */ {"c", "l", "n", "s"},
/* PressureChange */ {"down", "steady", "up"},
/* ChangeTempMag */ {"large", "small"},
/* EvapMM */ null,
/* MaxWindSpeed */ null,
/* Temp9am */ null,
/* RelHumid9am */ null,
/* Cloud9am */ null,
/* WindSpeed9am */ null,
/* Pressure9am */ null,
/* Temp3pm */ null,
/* RelHumid3pm */ null,
/* Cloud3pm */ null,
/* WindSpeed3pm */ null,
/* Pressure3pm */ null,
/* RainToday */ null,
/* TempRange */ null,
/* RISK_MM */ null
};
private final GenModel glm;
private final GenModel gbm;
// for each sub-model, mapping of the main model input and of the calculated columns to the sub-model input
private final Map<String, int[]> mappings;
// map of feature names to feature indices in the input array
private final Map<String, Integer> featureMap;
/**
* POJO constructor, creates instances of the sub-models and initializes
* helper structures for mapping input schema to the submodel schemas (mapping)
* and creates a map of feature names to indices to make value-lookups in code more readable.
*/
public %s() {
super(NAMES, DOMAINS, "RISK_MM"); // response name goes here
glm = new %s();
gbm = new %s();
mappings = makeMappings(glm, gbm);
featureMap = new HashMap<>(NAMES.length);
for (int i = 0; i < NAMES.length; i++) {
featureMap.put(NAMES[i], i);
}
}
@Override
public String getUUID() { return "MyComplexPojo1"; } // just to show there can be anything here
// Important to override - BUG in POJO import for regression, will not work without this - FIXME
@Override
public int getNumResponseClasses() {
return 1;
}
@Override
public final double[] score0(double[] data, double[] preds) {
// (1) Show how to create derived feature (one numerical, the other one categorical)
// ChangeTemp = Temp3pm - Temp9am
double changeTemp = fNum("Temp3pm", data) - fNum("Temp9am", data);
double changeTempDir = changeTemp >= 0 ? 1 : 0; // changeTempDir is categorical: 0 == "down", 1 == "up"
double[] calculated = {
changeTemp,
changeTempDir
};
// (2) Show how to score multiple models
double[] glmPreds = score0SubModel(glm, data, calculated);
double[] gbmPreds = score0SubModel(gbm, data, calculated);
// (3) Show how to make decisions based on availability of an input (NA handling)
double bias = fNum("Bias", data);
if (!isNA(bias)) { // defined
// (4) Show to plug in a custom formula
preds[0] = glmPreds[0] * bias + (1 - bias) * gbmPreds[0];
} else {
String changeWindDirect = fCat("ChangeWindDirect", data);
// (5) Show how to return default values
if (isNA(changeWindDirect)) { // NA case, use default prediction
preds[0] = 1;
} else { // non-NA case, plug-in a formula based on categorical value
// (6) Show how to handle decisions based on categorical variable (different segments)
switch (changeWindDirect) {
case "c":
case "l":
preds[0] = glmPreds[0] * 2;
break;
case "n":
preds[0] = (glmPreds[0] + gbmPreds[0]) / 2;
break;
case "s":
preds[0] = gbmPreds[0];
break;
default:
preds[0] = -1;
}
}
}
return preds;
}
private static boolean isNA(double val) {
return Double.isNaN(val);
}
private static boolean isNA(String val) {
return val == null;
}
private double fNum(String feature, double[] data) {
Integer idx = featureMap.get(feature);
if (idx == null)
throw new IllegalArgumentException("Column '" + feature + "' is not part of model features.");
return data[idx];
}
private String fCat(String feature, double[] data) {
Integer idx = featureMap.get(feature);
if (idx == null)
throw new IllegalArgumentException("Column '" + feature + "' is not part of model features.");
if (Double.isNaN(data[idx]))
return null;
int level = (int) data[idx];
return DOMAINS[idx][level];
}
/**
* Scores a given sub-model - input is the original input row and also the calculated features.
* Input and calculated feature are mapped to the input of the sub-model.
*/
private double[] score0SubModel(GenModel model, double[] data, double[] calculated) {
int[] mapping = mappings.get(model.getUUID());
double[] subModelData = makeModelInput(data, calculated, mapping);
double[] subModelPreds = new double[model.getPredsSize()];
return model.score0(subModelData, subModelPreds);
}
private Map<String, int[]> makeMappings(GenModel... models) {
Map<String, int[]> mappings = new HashMap<>();
for (GenModel model : models) {
int[] mapping = mapInputNamesToModelNames(model);
mappings.put(model.getUUID(), mapping);
}
return mappings;
}
private static double[] makeModelInput(double[] data, double[] calculated, int[] mapping) {
double[] input = new double[mapping.length];
for (int i = 0; i < input.length; i++) {
int p = mapping[i];
if (p >= 0) {
input[i] = data[p];
} else {
input[i] = calculated[-p - 1];
}
}
return input;
}
private int[] mapInputNamesToModelNames(GenModel subModel) {
int[] map = new int[subModel.nfeatures()];
for (int i = 0; i < map.length; i++) {
String name = subModel._names[i];
int p = indexOf(NAMES, name);
if (p < 0) {
p = indexOf(CALCULATED, name);
assert p >= 0 : "'" + name + "' needs to be one of the sub-model features or be a calculated feature.";
p = -p - 1;
}
map[i] = p;
}
return map;
}
private static int indexOf(String[] ar, String element) {
for (int i = 0; i < ar.length; i++) {
if (ar[i].equals(element))
return i;
}
return -1;
}
}
// ===GLM===
%s
// ===GBM===
%s
"""
# Expand the template and embed POJOs for the submodels in a single java file
def generate_combined_pojo(glm_model, gbm_model):
glm_pojo_src = get_embeddable_pojo_source(glm_model)
gbm_pojo_src = get_embeddable_pojo_source(gbm_model)
results_dir = pyunit_utils.locate("results")
combined_pojo_name = "Combined_" + glm_model.model_id + "_" + gbm_model.model_id
combined_pojo_path = os.path.join(results_dir, combined_pojo_name + ".java")
combined_pojo_src = TEMPLATE % (combined_pojo_name, combined_pojo_name,
glm_model.model_id, gbm_model.model_id, glm_pojo_src, gbm_pojo_src)
with open(combined_pojo_path, "w") as combined_file:
combined_file.write(combined_pojo_src)
return combined_pojo_path
def get_embeddable_pojo_source(model):
pojo_path = model.download_pojo(path=os.path.join(pyunit_utils.locate("results"), model.model_id + ".java"))
return make_pojo_embeddable(pojo_path)
# To simplify the workflow we are embedding all models (POJO) in the same Java file
# There can be only one "public" class in a Java file, this method will make the POJO package private
# so that we can put it in the same Java file as the main POJO
def make_pojo_embeddable(pojo_path):
pojo_lines = []
with open(pojo_path, 'r') as pojo_file:
pojo_lines = pojo_file.readlines()
class_idx = next(filter(lambda idx: pojo_lines[idx].startswith("public class"), range(len(pojo_lines))))
pojo_lines[class_idx] = pojo_lines[class_idx].replace("public class", "class") # make package private
pojo_lines = pojo_lines[class_idx-1:]
return "".join(pojo_lines)
def generate_and_import_combined_pojo():
if sys.version_info[0] < 3: # Python 2
print("This example needs Python 3.x+")
return
weather_orig = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/weather.csv"))
weather = weather_orig # working copy
features = list(set(weather.names) - {"Date", "RainTomorrow", "Sunshine"})
features.sort()
response = "RISK_MM"
glm_model = H2OGeneralizedLinearEstimator()
glm_model.train(x=features, y=response, training_frame=weather)
glm_preds = glm_model.predict(weather)
gbm_model = H2OGradientBoostingEstimator(ntrees=5)
gbm_model.train(x=features, y=response, training_frame=weather)
gbm_preds = gbm_model.predict(weather)
# Drop columns that we will calculate in POJO manually (we will recreate them in POJO to be the exact same)
weather = weather.drop("ChangeTemp")
weather = weather.drop("ChangeTempDir")
combined_pojo_path = generate_combined_pojo(glm_model, gbm_model)
print("Combined POJO was stored in: " + combined_pojo_path)
# FIXME: https://h2oai.atlassian.net/browse/PUBDEV-8561 We need to make this work for upload_mojo as well
pojo_model = h2o.import_mojo(combined_pojo_path)
# Testing begins
# Sanity test - test parameterization that delegates to GLM
weather["Bias"] = 1 # behave like GLM
pojo_glm_preds = pojo_model.predict(weather)
assert_frame_equal(pojo_glm_preds.as_data_frame(), glm_preds.as_data_frame())
# Sanity test - test parameterization that delegates to GBM
weather["Bias"] = 0 # behave like GBM
pojo_gbm_preds = pojo_model.predict(weather)
assert_frame_equal(pojo_gbm_preds.as_data_frame(), gbm_preds.as_data_frame())
# Test per-segment specific behavior, segments are defined by ChangeWindDirect
weather["Bias"] = float("NaN")
for change_wind_dir in weather["ChangeWindDirect"].levels()[0]:
weather_cwd = weather[weather["ChangeWindDirect"] == change_wind_dir]
weather_orig_cwd = weather_orig[weather_orig["ChangeWindDirect"] == change_wind_dir]
pojo_weather_cwd_preds = pojo_model.predict(weather_cwd)
if change_wind_dir == "c" or change_wind_dir == "l":
expected = glm_model.predict(weather_orig_cwd) * 2
assert_frame_equal(pojo_weather_cwd_preds.as_data_frame(), expected.as_data_frame())
elif change_wind_dir == "n":
expected = (glm_model.predict(weather_orig_cwd) + gbm_model.predict(weather_orig_cwd)) / 2
assert_frame_equal(pojo_weather_cwd_preds.as_data_frame(), expected.as_data_frame())
elif change_wind_dir == "s":
expected = gbm_model.predict(weather_orig_cwd)
assert_frame_equal(pojo_weather_cwd_preds.as_data_frame(), expected.as_data_frame())
if __name__ == "__main__":
pyunit_utils.standalone_test(generate_and_import_combined_pojo)
else:
generate_and_import_combined_pojo()
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes, json
from webnotes.utils import cint, now, cstr
from webnotes import _
#sql = webnotes.conn.sql
class DocType:
def __init__(self, doc, doclist):
self.doc = doc
self.doclist = doclist
def autoname(self):
"""set name as email id"""
if self.doc.name not in ('Guest','Administrator'):
self.doc.email = self.doc.email.strip()
self.doc.name = self.doc.email
if webnotes.conn.exists("Profile", self.doc.name):
webnotes.msgprint("Name Exists", raise_exception=True)
def validate(self):
self.in_insert = self.doc.fields.get("__islocal")
if self.doc.name not in ('Guest','Administrator'):
self.validate_email_type(self.doc.email)
self.validate_max_users()
self.add_system_manager_role()
self.check_enable_disable()
if self.in_insert:
if self.doc.name not in ("Guest", "Administrator"):
self.send_welcome_mail()
webnotes.msgprint(_("Welcome Email Sent"))
else:
self.email_new_password()
self.doc.new_password = ""
def check_enable_disable(self):
# do not allow disabling administrator/guest
if not cint(self.doc.enabled) and self.doc.name in ["Administrator", "Guest"]:
webnotes.msgprint("Hey! You cannot disable user: %s" % self.doc.name,
raise_exception=1)
if not cint(self.doc.enabled):
self.a_system_manager_should_exist()
# clear sessions if disabled
if not cint(self.doc.enabled) and getattr(webnotes, "login_manager", None):
webnotes.local.login_manager.logout(user=self.doc.name)
def validate_max_users(self):
"""don't allow more than max users if set in conf"""
from webnotes import conf
# check only when enabling a user
webnotes.errprint("profile...")
if 'max_users' in conf and self.doc.enabled and \
self.doc.name not in ["Administrator", "Guest"] and \
cstr(self.doc.user_type).strip() in ("", "System User"):
active_users = webnotes.conn.sql("""select count(*) from tabProfile
where ifnull(enabled, 0)=1 and docstatus<2
and ifnull(user_type, "System User") = "System User"
and name not in ('Administrator', 'Guest', %s)""", (self.doc.name,))[0][0]
if active_users >= conf.max_users and conf.max_users:
webnotes.msgprint("""
You already have <b>%(active_users)s</b> active users, \
which is the maximum number that you are currently allowed to add. <br /><br /> \
So, to add more users, you can:<br /> \
1. <b>Upgrade to the unlimited users plan</b>, or<br /> \
2. <b>Disable one or more of your existing users and try again</b>""" \
% {'active_users': active_users}, raise_exception=1)
def add_system_manager_role(self):
# if adding system manager, do nothing
if not cint(self.doc.enabled) or ("System Manager" in [user_role.role for user_role in
self.doclist.get({"parentfield": "user_roles"})]):
return
if self.doc.user_type == "System User" and not self.get_other_system_managers():
webnotes.msgprint("""Adding System Manager Role as there must
be atleast one 'System Manager'.""")
self.doclist.append({
"doctype": "UserRole",
"parentfield": "user_roles",
"role": "System Manager"
})
def email_new_password(self):
if self.doc.new_password and not self.in_insert:
from webnotes.auth import _update_password
_update_password(self.doc.name, self.doc.new_password)
self.password_update_mail(self.doc.new_password)
webnotes.msgprint("New Password Emailed.")
def on_update(self):
# owner is always name
webnotes.conn.set(self.doc, 'owner', self.doc.name)
webnotes.clear_cache(user=self.doc.name)
self.doc.account_id=self.doc.account_id.lower()
#if self.doc.enabled==0:
# a=webnotes.conn.sql("select accountID from Account where contactEmail='"+cstr(self.doc.email)+"'")
#t=webnotes.conn.sql("select contactName from `Account` limit 1")
# webnotes.errprint(a)
# if a:
# res="update Account set (isActive='"+cstr(self.doc.enabled)+"',description='"+cstr(self.doc.account_description)+"',contactName='"+cstr(self.doc.first_name)+"',contactPhone='"+cstr(self.doc.contact_phone)+"',notifyEmail='"+cstr(self.doc.notify_email)+"',speedUnits='"+cstr(self.doc.speed_units)+"',distanceUnits='"+cstr(self.doc.distance_units)+"',volumeUnits='"+cstr(self.doc.volume_units)+"',pressureUnits='"+cstr(self.doc.pressure_units)+"',economyUnits='"+cstr(self.doc.economy_units)+"',temperatureUnits='"+cstr(self.doc.temperature_units)+"',latLonFormat='"+cstr(self.doc.latitude_longitude_format)+"') where contactEmail='"+cstr(self.doc.email)+"'"
# webnotes.errprint(res)
# else:
# qry="insert into Account (isActive,accountID,description,contactName,contactPhone,contactEmail,notifyEmail,speedUnits,distanceUnits,volumeUnits,pressureUnits,economyUnits,temperatureUnits,latLonFormat) values('"+cstr(self.doc.enabled)+"','"+cstr(self.doc.account_id)+"','"+cstr(self.doc.account_description)+"','"+cstr(self.doc.first_name)+"','"+cstr(self.doc.contact_phone)+"','"+cstr(self.doc.email)+"','"+cstr(self.doc.notify_email)+"','"+cstr(self.doc.speed_units)+"','"+cstr(self.doc.distance_units)+"','"+cstr(self.doc.volume_units)+"','"+cstr(self.doc.pressure_units)+"','"+cstr(self.doc.economy_units)+"','"+cstr(self.doc.temperature_units)+"','"+cstr(self.doc.latitude_longitude_format)+"')"
# webnotes.errprint(qry)
def reset_password(self):
from webnotes.utils import random_string, get_url
key = random_string(32)
webnotes.conn.set_value("Profile", self.doc.name, "reset_password_key", key)
self.password_reset_mail(get_url("/update-password?key=" + key))
def get_other_system_managers(self):
return webnotes.conn.sql("""select distinct parent from tabUserRole user_role
where role='System Manager' and docstatus<2
and parent not in ('Administrator', %s) and exists
(select * from `tabProfile` profile
where profile.name=user_role.parent and enabled=1)""", (self.doc.name,))
def get_fullname(self):
"""get first_name space last_name"""
return (self.doc.first_name or '') + \
(self.doc.first_name and " " or '') + (self.doc.last_name or '')
def password_reset_mail(self, link):
"""reset password"""
txt = """
## Password Reset
Dear %(first_name)s,
Please click on the following link to update your new password:
<a href="%(link)s">%(link)s</a>
Thank you,<br>
%(user_fullname)s
"""
self.send_login_mail("Your " + webnotes.get_config().get("app_name") + " password has been reset",
txt, {"link": link})
def password_update_mail(self, password):
txt = """
## Password Update Notification
Dear %(first_name)s,
Your password has been updated. Here is your new password: %(new_password)s
Thank you,<br>
%(user_fullname)s
"""
self.send_login_mail("Your " + webnotes.get_config().get("app_name") + " password has been reset",
txt, {"new_password": password})
def send_welcome_mail(self):
"""send welcome mail to user with password and login url"""
from webnotes.utils import random_string, get_url
self.doc.reset_password_key = random_string(32)
link = get_url("/update-password?key=" + self.doc.reset_password_key)
txt = """
## %(company)s
Dear %(first_name)s,
A new account has been created for you.
Your login id is: %(user)s
To complete your registration, please click on the link below:
<a href="%(link)s">%(link)s</a>
Thank you,<br>
%(user_fullname)s
"""
self.send_login_mail("Welcome to " + webnotes.get_config().get("app_name"), txt,
{ "link": link })
def send_login_mail(self, subject, txt, add_args):
"""send mail with login details"""
import os
from webnotes.utils.email_lib import sendmail_md
from webnotes.profile import get_user_fullname
from webnotes.utils import get_url
full_name = get_user_fullname(webnotes.session['user'])
if full_name == "Guest":
full_name = "Administrator"
args = {
'first_name': self.doc.first_name or self.doc.last_name or "user",
'user': self.doc.name,
'company': webnotes.conn.get_default('company') or webnotes.get_config().get("app_name"),
'login_url': get_url(),
'product': webnotes.get_config().get("app_name"),
'user_fullname': full_name
}
args.update(add_args)
sender = webnotes.session.user not in ("Administrator", "Guest") and webnotes.session.user or None
sendmail_md(recipients=self.doc.email, sender=sender, subject=subject, msg=txt % args)
def a_system_manager_should_exist(self):
if not self.get_other_system_managers():
webnotes.msgprint(_("""Hey! There should remain at least one System Manager"""),
raise_exception=True)
def on_trash(self):
webnotes.clear_cache(user=self.doc.name)
if self.doc.name in ["Administrator", "Guest"]:
webnotes.msgprint("""Hey! You cannot delete user: %s""" % (self.name, ),
raise_exception=1)
self.a_system_manager_should_exist()
# disable the user and log him/her out
self.doc.enabled = 0
if getattr(webnotes.local, "login_manager", None):
webnotes.local.login_manager.logout(user=self.doc.name)
# delete their password
webnotes.conn.sql("""delete from __Auth where user=%s""", self.doc.name)
# delete todos
webnotes.conn.sql("""delete from `tabToDo` where owner=%s""", self.doc.name)
webnotes.conn.sql("""update tabToDo set assigned_by=null where assigned_by=%s""",
self.doc.name)
# delete events
webnotes.conn.sql("""delete from `tabEvent` where owner=%s
and event_type='Private'""", self.doc.name)
webnotes.conn.sql("""delete from `tabEvent User` where person=%s""", self.doc.name)
# delete messages
webnotes.conn.sql("""delete from `tabComment` where comment_doctype='Message'
and (comment_docname=%s or owner=%s)""", (self.doc.name, self.doc.name))
def before_rename(self, olddn, newdn, merge=False):
webnotes.clear_cache(user=olddn)
self.validate_rename(olddn, newdn)
def validate_rename(self, olddn, newdn):
# do not allow renaming administrator and guest
if olddn in ["Administrator", "Guest"]:
webnotes.msgprint("""Hey! You are restricted from renaming the user: %s""" % \
(olddn, ), raise_exception=1)
self.validate_email_type(newdn)
def validate_email_type(self, email):
from webnotes.utils import validate_email_add
email = email.strip()
if not validate_email_add(email):
webnotes.msgprint("%s is not a valid email id" % email)
raise Exception
def after_rename(self, olddn, newdn, merge=False):
tables = webnotes.conn.sql("show tables")
for tab in tables:
desc = webnotes.conn.sql("desc `%s`" % tab[0], as_dict=1)
has_fields = []
for d in desc:
if d.get('Field') in ['owner', 'modified_by']:
has_fields.append(d.get('Field'))
for field in has_fields:
webnotes.conn.sql("""\
update `%s` set `%s`=%s
where `%s`=%s""" % \
(tab[0], field, '%s', field, '%s'), (newdn, olddn))
# set email
webnotes.conn.sql("""\
update `tabProfile` set email=%s
where name=%s""", (newdn, newdn))
# update __Auth table
if not merge:
webnotes.conn.sql("""update __Auth set user=%s where user=%s""", (newdn, olddn))
def add_roles(self, *roles):
for role in roles:
if role in [d.role for d in self.doclist.get({"doctype":"UserRole"})]:
continue
self.bean.doclist.append({
"doctype": "UserRole",
"parentfield": "user_roles",
"role": role
})
self.bean.save()
@webnotes.whitelist()
def get_languages():
from webnotes.translate import get_lang_dict
languages = get_lang_dict().keys()
languages.sort()
return [""] + languages
@webnotes.whitelist()
def get_all_roles(arg=None):
"""return all roles"""
return [r[0] for r in webnotes.conn.sql("""select name from tabRole
where name not in ('Administrator', 'Guest', 'All') order by name""")]
@webnotes.whitelist()
def get_user_roles(arg=None):
"""get roles for a user"""
return webnotes.get_roles(webnotes.form_dict['uid'])
@webnotes.whitelist()
def get_perm_info(arg=None):
"""get permission info"""
return webnotes.conn.sql("""select parent, permlevel, `read`, `write`, submit,
cancel, amend from tabDocPerm where role=%s
and docstatus<2 order by parent, permlevel""",
webnotes.form_dict['role'], as_dict=1)
@webnotes.whitelist(allow_guest=True)
def update_password(new_password, key=None, old_password=None):
# verify old password
if key:
user = webnotes.conn.get_value("Profile", {"reset_password_key":key})
if not user:
return _("Cannot Update: Incorrect / Expired Link.")
elif old_password:
user = webnotes.session.user
if not webnotes.conn.sql("""select user from __Auth where password=password(%s)
and user=%s""", (old_password, user)):
return _("Cannot Update: Incorrect Password")
from webnotes.auth import _update_password
_update_password(user, new_password)
webnotes.conn.set_value("Profile", user, "reset_password_key", "")
return _("Password Updated")
@webnotes.whitelist(allow_guest=True)
def sign_up(email, full_name):
profile = webnotes.conn.get("Profile", {"email": email})
if profile:
if profile.disabled:
return _("Registered but disabled.")
else:
return _("Already Registered")
else:
if webnotes.conn.sql("""select count(*) from tabProfile where
TIMEDIFF(%s, modified) > '1:00:00' """, now())[0][0] > 200:
raise Exception, "Too Many New Profiles"
from webnotes.utils import random_string
profile = webnotes.bean({
"doctype":"Profile",
"email": email,
"first_name": full_name,
"enabled": 1,
"new_password": random_string(10),
"user_type": "Website User"
})
profile.ignore_permissions = True
profile.insert()
return _("Registration Details Emailed.")
@webnotes.whitelist(allow_guest=True)
def reset_password(user):
user = webnotes.form_dict.get('user', '')
if user in ["demo@erpnext.com", "Administrator"]:
return "Not allowed"
if webnotes.conn.sql("""select name from tabProfile where name=%s""", user):
# Hack!
webnotes.session["user"] = "Administrator"
profile = webnotes.bean("Profile", user)
profile.get_controller().reset_password()
return "Password reset details sent to your email."
else:
return "No such user (%s)" % user
def profile_query(doctype, txt, searchfield, start, page_len, filters):
from webnotes.widgets.reportview import get_match_cond
return webnotes.conn.sql("""select name, concat_ws(' ', first_name, middle_name, last_name)
from `tabProfile`
where ifnull(enabled, 0)=1
and docstatus < 2
and name not in ('Administrator', 'Guest')
and user_type != 'Website User'
and (%(key)s like "%(txt)s"
or concat_ws(' ', first_name, middle_name, last_name) like "%(txt)s")
%(mcond)s
order by
case when name like "%(txt)s" then 0 else 1 end,
case when concat_ws(' ', first_name, middle_name, last_name) like "%(txt)s"
then 0 else 1 end,
name asc
limit %(start)s, %(page_len)s""" % {'key': searchfield, 'txt': "%%%s%%" % txt,
'mcond':get_match_cond(doctype, searchfield), 'start': start, 'page_len': page_len})
def get_total_users():
"""Returns total no. of system users"""
return webnotes.conn.sql("""select count(*) from `tabProfile`
where enabled = 1 and user_type != 'Website User'
and name not in ('Administrator', 'Guest')""")[0][0]
def get_active_users():
"""Returns No. of system users who logged in, in the last 3 days"""
return webnotes.conn.sql("""select count(*) from `tabProfile`
where enabled = 1 and user_type != 'Website User'
and name not in ('Administrator', 'Guest')
and hour(timediff(now(), last_login)) < 72""")[0][0]
def get_website_users():
"""Returns total no. of website users"""
return webnotes.conn.sql("""select count(*) from `tabProfile`
where enabled = 1 and user_type = 'Website User'""")[0][0]
def get_active_website_users():
"""Returns No. of website users who logged in, in the last 3 days"""
return webnotes.conn.sql("""select count(*) from `tabProfile`
where enabled = 1 and user_type = 'Website User'
and hour(timediff(now(), last_login)) < 72""")[0][0]
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from mock import patch
import unittest
import pytest
from bokeh.plotting import figure
from bokeh.models import GlyphRenderer, Label, Plot, LinearAxis
from bokeh.models.ranges import FactorRange, DataRange1d, Range1d
from bokeh.models.scales import CategoricalScale, LinearScale, LogScale
from bokeh.models.tools import PanTool, Toolbar
class TestPlotSelect(unittest.TestCase):
def setUp(self):
self._plot = figure(tools='pan')
self._plot.circle([1,2,3], [3,2,1], name='foo')
@patch('bokeh.models.plots.find')
def test_string_arg(self, mock_find):
self._plot.select('foo')
self.assertTrue(mock_find.called)
self.assertEqual(mock_find.call_args[0][1], dict(name='foo'))
@patch('bokeh.models.plots.find')
def test_type_arg(self, mock_find):
self._plot.select(PanTool)
self.assertTrue(mock_find.called)
self.assertEqual(mock_find.call_args[0][1], dict(type=PanTool))
@patch('bokeh.models.plots.find')
def test_kwargs(self, mock_find):
kw = dict(name='foo', type=GlyphRenderer)
self._plot.select(**kw)
self.assertTrue(mock_find.called)
self.assertEqual(mock_find.call_args[0][1], kw)
def test_too_many_args(self):
with self.assertRaises(TypeError) as cm:
self._plot.select('foo', 'bar')
self.assertEqual(
'select accepts at most ONE positional argument.',
str(cm.exception)
)
def test_no_input(self):
with self.assertRaises(TypeError) as cm:
self._plot.select()
self.assertEqual(
'select requires EITHER a positional argument, OR keyword arguments.',
str(cm.exception)
)
def test_arg_and_kwarg(self):
with self.assertRaises(TypeError) as cm:
self._plot.select('foo', type=PanTool)
self.assertEqual(
'select accepts EITHER a positional argument, OR keyword arguments (not both).',
str(cm.exception)
)
def test_plot_add_layout_raises_error_if_not_render():
plot = figure()
with pytest.raises(ValueError):
plot.add_layout(Range1d())
def test_plot_add_layout_raises_error_if_plot_already_on_annotation():
plot = figure()
with pytest.raises(ValueError):
plot.add_layout(Label(plot=plot))
def test_plot_add_layout_adds_label_to_plot_renderers():
plot = figure()
label = Label()
plot.add_layout(label)
assert label in plot.renderers
def test_plot_add_layout_adds_axis_to_renderers_and_side_renderers():
plot = figure()
axis = LinearAxis()
plot.add_layout(axis, 'left')
assert axis in plot.renderers
assert axis in plot.left
def test_sizing_mode_property_is_fixed_by_default():
plot = figure()
assert plot.sizing_mode is 'fixed'
class BaseTwinAxis(object):
"""Base class for testing extra ranges"""
def verify_axis(self, axis_name):
plot = Plot() # no need for setUp()
range_obj = getattr(plot, 'extra_{}_ranges'.format(axis_name))
range_obj['foo_range'] = self.get_range_instance()
self.assertTrue(range_obj['foo_range'])
def test_x_range(self):
self.verify_axis('x')
def test_y_range(self):
self.verify_axis('y')
@staticmethod
def get_range_instance():
raise NotImplementedError
class TestCategoricalTwinAxis(BaseTwinAxis, unittest.TestCase):
"""Test whether extra x and y ranges can be categorical"""
@staticmethod
def get_range_instance():
return FactorRange('foo', 'bar')
class TestLinearTwinAxis(BaseTwinAxis, unittest.TestCase):
"""Test whether extra x and y ranges can be Range1d"""
@staticmethod
def get_range_instance():
return Range1d(0, 42)
def test_setting_logo_on_plot_declaration_sets_them_on_toolbar():
plot = Plot(logo='grey')
assert plot.toolbar.logo == 'grey', "Remove this test when deprecation cycle is over"
def test_setting_tools_on_plot_declaration_sets_them_on_toolbar():
pan = PanTool()
plot = Plot(tools=[pan])
assert plot.toolbar.tools == [pan], "Remove this test when deprecation cycle is over"
def test_plot_raises_error_if_toolbar_and_logo_are_set():
with pytest.raises(ValueError):
Plot(logo='grey', toolbar=Toolbar())
def test_plot_raises_error_if_toolbar_and_tools_are_set():
with pytest.raises(ValueError):
Plot(tools=[PanTool()], toolbar=Toolbar())
def test_plot_with_no_title_specified_creates_an_empty_title():
plot = Plot()
assert plot.title.text == ""
def test_plot__scale_classmethod():
assert isinstance(Plot._scale("auto"), LinearScale)
assert isinstance(Plot._scale("linear"), LinearScale)
assert isinstance(Plot._scale("log"), LogScale)
assert isinstance(Plot._scale("categorical"), CategoricalScale)
with pytest.raises(ValueError):
Plot._scale("malformed_type")
def test_plot_x_mapper_type_kwarg_sets_x_scale():
plot = Plot(x_mapper_type="linear")
assert isinstance(plot.x_scale, LinearScale)
def test_plot_y_mapper_type_kwarg_sets_y_scale():
plot = Plot(y_mapper_type="log")
assert isinstance(plot.y_scale, LogScale)
def test_plot_raises_error_if_x_mapper_type_and_x_scale_are_set():
with pytest.raises(ValueError):
Plot(x_mapper_type="linear", x_scale=LinearScale())
def test_plot_raises_error_if_y_mapper_type_and_y_scale_are_set():
with pytest.raises(ValueError):
Plot(y_mapper_type="log", y_scale=LogScale())
def test__check_required_scale_has_scales():
plot = Plot()
check = plot._check_required_scale()
assert check == []
def test__check_required_scale_missing_scales():
plot = Plot(x_scale=None, y_scale=None)
check = plot._check_required_scale()
assert check != []
def test__check_compatible_scale_and_ranges_compat_numeric():
plot = Plot(x_scale=LinearScale(), x_range=Range1d())
check = plot._check_compatible_scale_and_ranges()
assert check == []
plot = Plot(y_scale=LogScale(), y_range=DataRange1d())
check = plot._check_compatible_scale_and_ranges()
assert check == []
def test__check_compatible_scale_and_ranges_compat_factor():
plot = Plot(x_scale=CategoricalScale(), x_range=FactorRange())
check = plot._check_compatible_scale_and_ranges()
assert check == []
def test__check_compatible_scale_and_ranges_incompat_numeric_scale_and_factor_range():
plot = Plot(x_scale=LinearScale(), x_range=FactorRange())
check = plot._check_compatible_scale_and_ranges()
assert check != []
def test__check_compatible_scale_and_ranges_incompat_factor_scale_and_numeric_range():
plot = Plot(x_scale=CategoricalScale(), x_range=DataRange1d())
check = plot._check_compatible_scale_and_ranges()
assert check != []
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
tesla.py
Copyright (c) 2015 Rob Mason
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Twitter: @Teslaliving
Blog: http://teslaliving.net
Description:
Monitor your Tesla via the unofficial Tesla API
Supply your myTesla login information via environment variables:
TESLA_EMAILcheck_current_firmware_version
TESLA_PASSWORD
Uses third party library:
pip install requests_oauthlib
https://github.com/gglockner/teslajson
Note: Use "requests" branch due to recent API changes
See also the unofficial Tesla API docs:
http://docs.timdorr.apiary.io/#
Examples:
./tesla.py --pluggedin - Check if your Tesla is plugged in
I use cron to run a bunch of these, example:
00 22 * * * source ~/.bashrc;cd "~/Documents/Data";/usr/bin/python \
~/Documents/Code/evtools/tesla.py --pluggedin
Use --help for all options
"""
import os
import json
import argparse
import fcntl
import logging
from logging.handlers import RotatingFileHandler
import traceback
import time
import random
from urllib.error import HTTPError
import datetime
from tl_tweets import tweet_string
from tl_email import email
from tl_weather import get_daytime_weather_data
import glob
import sys
basepath = os.path.dirname(sys.argv[0])
sys.path.append(os.path.join(basepath, 'teslajson'))
import teslajson
# Where logging output from this tool goes. Modify path as needed
LOGFILE = os.path.expanduser(os.environ['TESLA_LOGFILE'])
# Data file containing all the saved state information
DATA_FILE = os.path.expanduser(os.getenv('TESLA_DATA_FILE', "tesla.json"))
# Subdirectory where Tesla state dumps will be saved
DUMP_DIR = "tesla_state_dumps"
# Updated with your car name (API needs car name)
CAR_NAME = os.environ['TESLA_CAR_NAME']
# Some of the tweets attach pictures. They're randomly chosen from this path
PICTURES_PATH = os.path.expanduser(os.getenv('TESLA_PICTURES_PATH', "images/favorites"))
VERSION_IMAGES = glob.glob('images/versions/*-watermark*')
# Logging setup
DEF_FRMT = "%(asctime)s : %(levelname)-8s : %(funcName)-25s:%(lineno)-4s: %(message)s"
loglevel = logging.DEBUG
logT = logging.getLogger("tesla")
loghandler = RotatingFileHandler(LOGFILE, maxBytes=5 * 1024 * 1024, backupCount=8)
loghandler.setFormatter(logging.Formatter(DEF_FRMT))
logT.addHandler(loghandler)
logT.setLevel(loglevel)
# Get the collection of pictures
def get_pics():
if os.path.exists(PICTURES_PATH):
pics = [os.path.join(PICTURES_PATH, f) for f in os.listdir(PICTURES_PATH) if not f.startswith('.')]
else:
pics = [None, ]
return pics
# Set to true to disable tweets/data file updates
DEBUG_MODE = 'DEBUG_MODE' in os.environ
MAX_RETRIES = 3
RETRY_SLEEP = 10
# Get Teslamotors.com login information from environment
TESLA_EMAIL = None
TESLA_PASSWORD = None
if 'TESLA_EMAIL' in os.environ:
TESLA_EMAIL = os.environ['TESLA_EMAIL']
if 'TESLA_PASSWORD' in os.environ:
TESLA_PASSWORD = os.environ['TESLA_PASSWORD']
if not TESLA_EMAIL or not TESLA_PASSWORD:
raise Exception("Missing Tesla login information")
def mail_exception(e):
logT.exception("Exception encountered")
message = "There was a problem during tesla updates:\n\n"
message += e
message += "\nPlease investigate."
if DEBUG_MODE:
raise Exception("email issues")
else:
email(email=TESLA_EMAIL, message=message, subject="Tesla script error")
def establish_connection(token=None):
logT.debug("Connecting to Tesla")
c = teslajson.Connection(email=TESLA_EMAIL, password=TESLA_PASSWORD)
# TODO: He removed support for access_token, checking on future support
# c = teslajson.Connection(email=TESLA_EMAIL, password=TESLA_PASSWORD, access_token=token)
logT.debug(" connected. Token: %s", get_access_token(c))
return c
def get_access_token(c):
return None
def tweet_major_mileage(miles, get_tweet=False):
m = "{:,}".format(miles)
a = random.choice(["an amazing", "an awesome", "a fantastic", "a wonderful"])
message = "Just passed %s miles on my Model S 75D! It's been %s experience. " \
"#Tesla @TeslaMotors @Teslarati #bot" % (m, a)
pic = random.choice(get_pics())
if DEBUG_MODE:
print("Would tweet:\n%s with pic: %s" % (message, pic))
logT.debug("DEBUG mode, not tweeting: %s with pic: %s", message, pic)
else:
logT.info("Tweeting: %s with pic: %s", message, pic)
if get_tweet:
return message, pic
else:
tweet_string(message=message, log=logT, media=pic)
def dump_current_tesla_status(c):
vehicles = c.vehicles
m = ""
for v in vehicles:
m += "%s status at %s\n" % (v["display_name"], datetime.datetime.today())
for i in v:
if i != 'display_name':
m += " %s: %s\n" % (i, v[i])
for s in ["vehicle_state", "charge_state", "climate_state", "drive_state", "gui_settings"]:
m += " %s:\n" % s
d = v.data_request("%s" % s)
for i in d:
m += " %s: %s\n" % (i, d[i])
return m
def check_tesla_fields(c, data):
data_changed = False
new_fields = []
t = datetime.date.today()
ts = t.strftime("%Y%m%d")
if not "known_fields" in data:
data["known_fields"] = {}
data_changed = True
vehicles = c.vehicles
for v in vehicles:
logT.debug(" Processing %s" % v["display_name"])
for i in v:
if i not in data["known_fields"]:
logT.debug(" found new field %s. Value: %s", i, v[i])
new_fields.append(i)
data["known_fields"][i] = ts
data_changed = True
for s in ["vehicle_state", "charge_state", "climate_state", "drive_state", "gui_settings"]:
logT.debug(" Checking %s" % s)
d = v.data_request("%s" % s)
for i in d:
if i not in data["known_fields"]:
logT.debug(" found new field %s. Value: %s", i, d[i])
new_fields.append(i)
data["known_fields"][i] = ts
data_changed = True
if len(new_fields) > 0:
m = "Found %s new Tesla API fields:\n" % "{:,}".format(len(new_fields))
for i in new_fields:
m += "\t%s\n" % i
m += "\nRegards,\nRob"
email(email=TESLA_EMAIL, message=m, subject="New Tesla API fields detected")
else:
logT.debug(" No new API fields detected.")
return data_changed, data
def get_temps(c, car):
inside_temp = None
outside_temp = None
for v in c.vehicles:
if v["display_name"] == car:
res = v.command("auto_conditioning_start")
logT.debug("AC start: %s", res)
time.sleep(5)
d = v.data_request("climate_state")
logT.debug("Climate: %s", d)
inside_temp = 9.0 / 5.0 * d["inside_temp"] + 32
outside_temp = 9.0 / 5.0 * d["outside_temp"] + 32
res = v.command("auto_conditioning_stop")
logT.debug("AC stop: %s", res)
return inside_temp, outside_temp
def trigger_garage_door(c, car):
logT.debug("Triggering garage door for %s", car)
for v in c.vehicles:
if v["display_name"] == car:
res = v.command("trigger_homelink")
logT.debug("Garage door trigger: %s", res)
return
def trigger_sunroof(c, car, state):
logT.debug("Setting sunroof to %s for %s", state, car)
for v in c.vehicles:
if v["display_name"] == car:
cmd = {"state": state}
res = v.command("sun_roof_control", data=cmd)
logT.debug("Garage door trigger: %s", res)
return
def get_odometer(c, car):
odometer = None
for v in c.vehicles:
if v["display_name"] == car:
d = v.data_request("vehicle_state")
odometer = int(d["odometer"])
logT.debug("Mileage: %s", "{:,}".format(int(odometer)))
return odometer
def is_plugged_in(c, car):
plugged_in = False
for v in c.vehicles:
if v["display_name"] == car:
d = v.data_request("charge_state")
# charge_port_door_open and charge_port_latch have been unreliable individually
charge_door_open = d["charge_port_latch"] == "Disengaged" or d["charge_port_door_open"]
state = d["charging_state"]
plugged_in = charge_door_open and state != "Disconnected"
logT.debug("Door unlatched: %s. State: %s", charge_door_open, state)
logT.debug("Latch: %s Door open: %s", d["charge_port_latch"], d["charge_port_door_open"])
return plugged_in
def is_charging(c, car):
rc = False
for v in c.vehicles:
if v["display_name"] == car:
d = v.data_request("charge_state")
logT.debug(" Charging State: %s", d["charging_state"])
state = d["charging_state"]
if state == "Charging" or state == "Complete":
rc = True
return rc
def get_current_state(c, car, include_temps=False):
s = {}
for v in c.vehicles:
if v["display_name"] == car:
if v['state'] == 'asleep':
continue
d = v.data_request("vehicle_state")
s["odometer"] = d["odometer"]
s["version"] = d["car_version"]
if include_temps:
s["inside_temp"], s["outside_temp"] = get_temps(c, car)
d = v.data_request("charge_state")
s["soc"] = d["battery_level"]
s["ideal_range"] = d["ideal_battery_range"]
s["rated_range"] = d["battery_range"]
s["estimated_range"] = d["est_battery_range"]
s["charge_energy_added"] = d["charge_energy_added"]
s["charge_miles_added_ideal"] = d["charge_miles_added_ideal"]
s["charge_miles_added_rated"] = d["charge_miles_added_rated"]
logT.debug(s)
return s
def load_data():
if os.path.exists(DATA_FILE):
logT.debug("Loading existing tesla database")
data = json.load(open(DATA_FILE, "r"))
logT.debug(" loaded")
else:
logT.debug("No existing tesla database found")
data = {'daily_state': {}}
if not 'daily_state_pm' in data:
data['daily_state_pm'] = {}
if not 'daily_state_am' in data:
data['daily_state_am'] = {}
if 'config' in data:
del data['config']
if "day_charges" not in data:
data["day_charges"] = 0
if "charging" not in data:
data["charging"] = False
return data
def save_data(data):
logT.debug("Save tesla database")
if not DEBUG_MODE:
json.dump(data, open(DATA_FILE + ".tmp", "w"))
if os.path.exists(DATA_FILE):
os.remove(DATA_FILE)
os.rename(DATA_FILE + ".tmp", DATA_FILE)
else:
logT.debug(" Skipped saving due to debug mode")
def get_lock():
# Make sure we only run one instance at a time
blocked = True
max_wait_count = 10
while blocked:
fp = open('/tmp/tesla.lock', 'w')
try:
fcntl.flock(fp.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
blocked = False
except:
max_wait_count -= 1
if max_wait_count == 0:
raise Exception("Lock file not getting released. Please investigate")
logT.debug("Someone else is running this tool right now. Sleeping")
time.sleep(30)
def remove_lock():
try:
os.remove('/tmp/tesla.lock')
except:
pass
def report_yesterday(data):
# Report on yesterdays mileage/efficiency
t = datetime.date.today()
today_ts = t.strftime("%Y%m%d")
t = t + datetime.timedelta(days=-1)
yesterday_ts = t.strftime("%Y%m%d")
if today_ts not in data["daily_state_am"] or yesterday_ts not in data["daily_state_am"]:
logT.debug("Skipping yesterday tweet due to missing items")
m = None
pic = None
else:
miles_driven = data["daily_state_am"][today_ts]["odometer"] - data["daily_state_am"][yesterday_ts][
"odometer"]
kw_used = data["daily_state_am"][today_ts]["charge_energy_added"]
if miles_driven > 200:
m = "Yesterday I drove my #Tesla %s miles on a road trip! " \
"@Teslamotors #bot" % ("{:,}".format(int(miles_driven)))
elif miles_driven == 0:
mileage = data["daily_state_am"][today_ts]["odometer"]
today_ym = datetime.date.today()
start_ym = datetime.date(2014, 4, 21)
ownership_months = int((today_ym - start_ym).days / 30)
m = "Yesterday my #Tesla had a day off. Current mileage is %s miles after %d months " \
"@Teslamotors #bot" % ("{:,}".format(int(mileage)), ownership_months)
elif data["day_charges"] == 0 or data["day_charges"] > 1:
# Need to skip efficiency stuff here if car didnt charge last night or we charged more than once
# TODO: Could save prior efficiency from last charge and use that
day = yesterday_ts
time_value = time.mktime(time.strptime("%s2100" % day, "%Y%m%d%H%M"))
w = get_daytime_weather_data(logT, time_value)
m = "Yesterday I drove my #Tesla %s miles. Avg temp %.0fF. " \
"@Teslamotors #bot" \
% ("{:,}".format(int(miles_driven)), w["avg_temp"])
else:
# Drove a distance and charged exactly once since last report, we have enough data
# to report efficiency.
day = yesterday_ts
time_value = time.mktime(time.strptime("%s2100" % day, "%Y%m%d%H%M"))
w = get_daytime_weather_data(logT, time_value)
efficiency = kw_used * 1000 / miles_driven
# If efficiency isnt a reasonable number then don't report it.
# Example, drive somewhere and don't charge -- efficiency is zero.
# Or drive somewhere, charge at SC, then do normal charge - efficiency will look too high.
if kw_used > 0 and efficiency > 200 and efficiency < 700:
m = "Yesterday I drove my #Tesla %s miles using %.1f kWh with an effic. of %d Wh/mi. Avg temp %.0fF. " \
"@Teslamotors #bot" \
% ("{:,}".format(int(miles_driven)), kw_used, efficiency, w["avg_temp"])
else:
m = "Yesterday I drove my #Tesla %s miles. Avg temp %.0fF. " \
"@Teslamotors #bot" % ("{:,}".format(int(miles_driven)), w["avg_temp"])
pic = os.path.abspath(random.choice(get_pics()))
return m, pic
def get_update_for_yesterday():
get_lock()
data = load_data()
m, pic = report_yesterday(data)
remove_lock()
return m, pic
def check_current_firmware_version(c, data):
v = None
changed = False
try:
v = c.vehicles[0]
v.data_request("vehicle_state")["car_version"].split(" ")[0]
logT.debug("Found firmware version %s", v)
except:
logT.warning("Problems getting firmware version")
t = datetime.date.today()
ts = t.strftime("%Y%m%d")
if "firmware" in data:
if data["firmware"]["version"] != v:
# TODO: Log new one found
data["firmware"]["version"] = v
data["firmware"]["date_detected"] = ts
changed = True
else:
last_date = time.strptime(data["firmware"]["date_detected"], "%Y%m%d")
last_date = datetime.date.fromtimestamp(time.mktime(last_date))
time_since = (datetime.date.today() - last_date).days
try:
firmware_date = time.strptime(v[:7]+".6", "%Y.%W.%w")
except:
return changed
firmware_age = (datetime.date.today() - datetime.date.fromtimestamp(time.mktime(firmware_date))).days
message = "My 2018 S75D is running firmware version %s. " \
"Firmware is ~%d days old. " \
"%d days since last update #bot" % (v, firmware_age, time_since)
pic = random.choice(VERSION_IMAGES)
if DEBUG_MODE:
print("Would tweet:\n%s with pic: %s" % (message, pic))
logT.debug("DEBUG mode, not tweeting: %s with pic: %s", message, pic)
else:
logT.info("Tweeting: %s with pic: %s", message, pic)
tweet_string(message=message, log=logT, media=pic)
else:
data["firmware"] = {}
data["firmware"]["version"] = v
data["firmware"]["date_detected"] = ts
changed = True
return changed
def main():
parser = argparse.ArgumentParser(description='Tesla Control')
parser.add_argument('--status', help='Get car status', required=False, action='store_true')
parser.add_argument('--mileage', help='Check car mileage and tweet as it crosses 1,000 mile marks',
required=False, action='store_true')
parser.add_argument('--state', help='Record car state', required=False, action='store_true')
parser.add_argument('--pluggedin', help='Check if car is plugged in', required=False, action='store_true')
parser.add_argument('--dump', help='Dump all fields/data', required=False, action='store_true')
parser.add_argument('--fields', help='Check for newly added API fields', required=False, action='store_true')
parser.add_argument('--day', help='Show state data for given day', required=False, type=str)
parser.add_argument('--yesterday', help='Report on yesterdays driving', required=False, action='store_true')
parser.add_argument('--export', help='Export data', required=False, action='store_true')
parser.add_argument('--report', help='Produce summary report', required=False, action='store_true')
parser.add_argument('--garage', help='Trigger garage door (experimental)', required=False, action='store_true')
parser.add_argument('--sunroof', help='Control sunroof (vent, open, close)', required=False, type=str)
parser.add_argument('--mailtest', help='Test emailing', required=False, action='store_true')
parser.add_argument('--chargecheck', help='Check if car is currently charging', required=False,
action='store_true')
parser.add_argument('--firmware', help='Check for new firmware versions', required=False, action='store_true')
args = parser.parse_args()
get_lock()
logT.debug("--- tesla.py start ---")
data = load_data()
data_changed = False
# Get a connection to the car and manage access token
if 'token' in data:
token = data['token']
else:
token = None
try:
c = establish_connection(token)
except:
logT.debug("Problems establishing connection")
c = establish_connection()
if get_access_token(c):
if not 'token' in data or data['token'] != get_access_token(c):
data['token'] = get_access_token(c)
data_changed = True
if args.status:
# Dump current Tesla status
try:
print(dump_current_tesla_status(c))
except:
logT.warning("Couldn't dump status this pass")
elif args.dump:
# Dump all of Tesla API state information to disk
logT.debug("Dumping current Tesla state")
t = datetime.date.today()
ts = t.strftime("%Y%m%d")
try:
m = dump_current_tesla_status(c)
open(os.path.join(DUMP_DIR, "tesla_state_%s.txt" % ts), "w").write(m)
except:
logT.warning("Couldn't get dump this pass")
elif args.fields:
# Check for new Tesla API fields and report if any found
logT.debug("Checking Tesla API fields")
try:
data_changed, data = check_tesla_fields(c, data)
except:
logT.warning("Couldn't check fields this pass")
elif args.mileage:
# Tweet mileage as it crosses 1,000 mile marks
try:
m = get_odometer(c, CAR_NAME)
except:
logT.warning("Couldn't get odometer this pass")
return
if "mileage_tweet" not in data:
data["mileage_tweet"] = 0
if int(m / 1000) > int(data["mileage_tweet"] / 1000):
tweet_major_mileage(int(m / 1000) * 1000)
data["mileage_tweet"] = m
data_changed = True
elif args.chargecheck:
# Check for charges so we can correctly report daily efficiency
try:
m = is_charging(c, CAR_NAME)
except:
logT.warning("Couldn't get charge state this pass")
return
if not data["charging"] and m:
logT.debug(" State change, not charging to charging")
data["charging"] = True
data["day_charges"] += 1
data_changed = True
elif data["charging"] and m is False:
logT.debug(" State change from charging to not charging")
data["charging"] = False
data_changed = True
elif args.state:
# Save current Tesla state information
logT.debug("Saving Tesla state")
retries = 3
s = None
while retries > 0:
try:
s = get_current_state(c, CAR_NAME)
break
except:
retries -= 1
if retries > 0:
logT.exception(" Problem getting current state, sleeping and trying again")
time.sleep(30)
if s is None:
logT.warning(" Could not fetch current state")
else:
logT.debug(" got current state")
t = datetime.date.today()
ts = t.strftime("%Y%m%d")
hour = datetime.datetime.now().hour
if hour < 12:
ampm = "am"
else:
ampm = "pm"
data["daily_state_%s" % ampm][ts] = s
logT.debug(" added to database")
data_changed = True
elif args.day:
# Show Tesla state information from a given day
ts = args.day
raw = ""
if ts in data["daily_state_am"]:
print("Data for %s am:" % ts)
for i in ("odometer", "soc", "ideal_range", "rated_range", "estimated_range", "charge_energy_added",
"charge_miles_added_ideal", "charge_miles_added_rated"):
print("%s: %s" % (i, data["daily_state_am"][ts][i]))
raw += "%s\t" % data["daily_state_am"][ts][i]
print("\nRaw: %s" % raw)
elif args.report:
# Show total and average energy added
total_energy_added = 0
for ts in data["daily_state_am"]:
if ts < "20151030":
continue
total_energy_added += data["daily_state_am"][ts]["charge_energy_added"]
print("Total Energy Added: %s kW" % "{:,.2f}".format(total_energy_added))
print("Average Energy Added: %s kW" % "{:,.2f}".format((total_energy_added / len(data["daily_state_am"]))))
elif args.export:
# Export all saved Tesla state information
for ts in sorted(data["daily_state_am"]):
if ts < "20151030":
continue
print("%s," % ts, end=' ')
for i in ("odometer", "soc", "ideal_range", "rated_range", "estimated_range", "charge_energy_added",
"charge_miles_added_ideal", "charge_miles_added_rated"):
print("%s," % data["daily_state_am"][ts][i], end=' ')
print("")
elif args.pluggedin:
# Check if the Tesla is plugged in and alert if not
logT.debug("Checking if Tesla is plugged in")
try:
plugged_in = is_plugged_in(c, CAR_NAME)
if not plugged_in:
s = get_current_state(c, CAR_NAME, include_temps=False)
message = "Your car is not plugged in.\n\n"
message += "Current battery level is %d%%. (%d estimated miles)" % (s["soc"], int(s["estimated_range"]))
message += "\n\nRegards,\nRob"
email(email=TESLA_EMAIL, message=message, subject="Your Tesla isn't plugged in")
logT.debug(" Not plugged in. Emailed notice.")
else:
logT.debug(" Its plugged in.")
except:
logT.warning("Problem checking plugged in state")
elif args.mailtest:
# Test emailing
logT.debug("Testing email function")
message = "Email test from tool.\n\n"
message += "If you're getting this its working."
message += "\n\nRegards,\nRob"
try:
email(email=TESLA_EMAIL, message=message, subject="Tesla Email Test")
logT.debug(" Successfully sent the mail.")
print("Mail send passed.")
except:
logT.exception("Problem trying to send mail")
print("Mail send failed, see log.")
elif args.yesterday:
m, pic = report_yesterday(data)
data["day_charges"] = 0
data_changed = True
if m:
if DEBUG_MODE:
print("Would tweet:\n%s with pic: %s" % (m, pic))
logT.debug("DEBUG mode, not tweeting: %s with pic: %s", m, pic)
else:
logT.info("Tweeting: %s with pic: %s", m, pic)
tweet_string(message=m, log=logT, media=pic)
else:
logT.debug("No update, skipping yesterday report")
elif args.garage:
# Open garage door (experimental as I dont have an AP car)
trigger_garage_door(c, CAR_NAME)
elif args.firmware:
# Check firmware version for a change
data_changed = check_current_firmware_version(c, data)
elif args.sunroof:
# Change sunroof state
trigger_sunroof(c, CAR_NAME, args.sunroof)
if data_changed:
save_data(data)
remove_lock()
logT.debug("--- tesla.py end ---")
if __name__ == '__main__':
for retry in range(MAX_RETRIES):
try:
main()
break
except SystemExit:
break
except HTTPError as e:
if e.code >= 500 or e.code == 408:
logT.debug("Transient error from Tesla API: %d", e.code)
logT.debug("Retrying again in %d seconds", RETRY_SLEEP)
time.sleep(RETRY_SLEEP)
# Unlock and retry
remove_lock()
else:
if DEBUG_MODE:
raise
else:
mail_exception(traceback.format_exc())
break
except:
if DEBUG_MODE:
raise
else:
mail_exception(traceback.format_exc())
break
|
|
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Crawls bugs on a project.
Called periodically to fetch new bugs on a project or do a full
bug DB re-scan.
"""
__author__ = 'alexto@google.com (Alexis O. Torres)'
# Disable 'Import not at top of file' lint error.
# pylint: disable-msg=C6204
try:
import auto_import_fixer
except ImportError:
pass # This will fail on unittest, OK to pass.
import logging
import re
import sys
import webapp2
import gdata
import gdata.client
import gdata.projecthosting
import gdata.projecthosting.client
from google.appengine.api import memcache
from google.appengine.api.labs import taskqueue
from google.appengine.ext import deferred
from crawlers import crawler_util
from common.handlers import base
from models import bugs_util
from models import crawl_state
# Regex to extract issue Ids from a bulk update operation.
_ISSUES_FROM_BULK_UPDATE_REGEX = re.compile(
'issue (\d+):')
class RecrawlProjectWorker(base.BaseHandler):
"""Worker handler to crawl all bugs in a project."""
def get(self):
"""Redirect get() request to post() to facilitate testing."""
self.post()
def post(self):
"""Starts crawling."""
project_name = self.GetRequiredParameter('project_name')
start_index = self.GetOptionalParameter('start_index', None)
if not start_index:
last = crawl_state.GetLastCrawlResults(bugs_util.Provider.ISSUETRACKER,
project_name)
start_index = last.end_index
else:
start_index = int(start_index)
max_results = 25
query = gdata.projecthosting.client.Query(
start_index=start_index, max_results=max_results)
phclient = gdata.projecthosting.client.ProjectHostingClient()
try:
issues = phclient.get_issues(project_name, query=query)
except gdata.client.Error, e:
retries = int(self.request.headers.get('X-AppEngine-TaskRetryCount', 0))
if retries < 4:
logging.warning('Retry crawling, retries is less than 5, '
'current retries: %s, start_index: %d',
retries, start_index)
raise # Re-raise, so that the task is re-tried.
else:
# Skip current, try at start_index + 1.
logging.warning('Skipping current index, start_index: %d', start_index)
taskqueue.add(url='/tasks/crawl/issuetracker/recrawl_project',
params={'project_name': project_name,
'start_index': start_index + 1})
return
(new_updates, total, unused_seen) = GetNewUpdates(issues, True)
if not new_updates:
crawl_state.StoreCrawlResults(
bugs_util.Provider.ISSUETRACKER, project_name,
start_index, start_index, 0)
self.response.out.write('Done.')
return # Reached the end of the crawl.
deferred.defer(crawler_util.SpawnDetailsCrawlersIssueTracker,
new_updates, project_name, True)
crawl_state.StoreCrawlResults(
bugs_util.Provider.ISSUETRACKER, project_name, start_index,
start_index + total, len(new_updates))
# Don't overwelm the provider, throttle to once per second.
taskqueue.add(url='/tasks/crawl/issuetracker/recrawl_project',
params={'project_name': project_name},
countdown=1)
self.response.out.write('start_index: %d, end_index: %d, total: %d'
% (start_index,
start_index + total,
total))
class CrawlRecentUpdatesWorker(base.BaseHandler):
"""Worker handler retrieve recent bug changes."""
def get(self):
"""Redirect get() request to post() to facilitate testing."""
self.post()
def post(self):
"""Starts crawling for recent updates."""
project_name = self.GetRequiredParameter('project_name')
max_results = self.GetOptionalParameter('max_results', 1000)
gdclient = gdata.client.GDClient()
issues = None
count = 1
while not issues:
try:
curr_max_results = int(int(max_results)/count)
logging.debug('Fetching %s issues.', curr_max_results)
issues = gdclient.get_feed(
GetUpdatesUrl(project_name, curr_max_results))
except SyntaxError, e:
count *= 2
new_max_results = int(int(max_results)/count)
logging.error(
'Failed to fetch issues feed. Try smaller mount: %d. Error: %s',
new_max_results, str(e))
if new_max_results == 0:
logging.error('Max results reached 0, terminating.')
return
(new_updates, total, seen) = GetNewUpdates(issues)
deferred.defer(crawler_util.SpawnDetailsCrawlersIssueTracker,
new_updates, project_name)
end_msg = ('Done crawling for updates.'
'Total updates: %d, already seen: %d'
%(total, seen))
logging.debug(end_msg)
self.response.out.write(end_msg)
def GetNewUpdates(issues, skip_recent_check=False):
"""Extract new issue from the issues feed."""
namespace = '%s/' % issues.id.text
results = []
total = 0
seen = 0
for issue in issues.entry:
if issue.id.text.find('bulk') > 0:
content_text = issue.content.text
updated = issue.updated.text
logging.debug('Found a bulk operation, updated: %s content: %s.',
updated, content_text)
issue_ids = _ISSUES_FROM_BULK_UPDATE_REGEX.findall(content_text)
logging.debug('Issue Ids found: %s', issue_ids)
total += len(issue_ids)
not_seen = [{'id': curr_id,
'updated': updated}
for curr_id in issue_ids
if skip_recent_check or not SeenRecently(
'%s_%s_%s' % (namespace, curr_id, updated))]
seen += len(issue_ids) - len(not_seen)
results.extend(not_seen)
elif skip_recent_check or not SeenRecently(issue.id.text):
total += 1
results.append({'id': issue.id.text.replace(namespace, ''),
'updated': issue.updated.text})
else:
total += 1
seen += 1
return (results, total, seen)
def SeenRecently(text_id):
"""Keeps track if a bug has already been seen, if so, returns True."""
key_name = 'IssuesSeenRecently_%s' % text_id
if memcache.get(key_name):
logging.debug('Issue has been seen recently. ID: %s', text_id)
return True
else:
logging.debug('Recent issue. ID: %s', text_id)
memcache.set(key_name, True, 432000) # Expires in 5 days (in seconds).
return False
def GetUpdatesUrl(project_name, max_results=1000):
"""Construct the URL to the issues updates for the given project."""
return ('http://code.google.com/feeds/p/%s'
'/issueupdates/basic?max-results=%d' %
(project_name, max_results))
app = webapp2.WSGIApplication(
[('/tasks/crawl/issuetracker/crawl_recent_updates',
CrawlRecentUpdatesWorker),
('/tasks/crawl/issuetracker/recrawl_project',
RecrawlProjectWorker)],
debug=True)
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
class Rect(object):
def __init__(self, x1, y1, x2, y2):
self.__x1 = x1
self.__x2 = x2
self.__y1 = y1
self.__y2 = y2
def x1(self): return self.__x1
def x2(self): return self.__x2
def y1(self): return self.__y1
def y2(self): return self.__y2
def xmid(self): return (self.__x1 + self.__x2) / 2
def ymid(self): return (self.__y1 + self.__y2) / 2
def width(self): return abs(self.__x1 - self.__x2)
def height(self): return abs(self.__y1 - self.__y2)
def area(self): return self.width() * self.height()
def points(self):
return ((self.__x1, self.__y1), (self.__x1, self.__y2), (self.__x2, self.__y2), (self.__x2, self.__y1))
def union(self, rect):
return Rect(min(rect.x1(), self.x1()), min(rect.y1(), self.y1()), max(rect.x2(), self.x2()), max(rect.y2(), self.y2()))
def vertical(self): return self.width() < self.height()
def horizontal(self): return self.height() < self.width()
def __repr__(self):
orientation = "V" if self.vertical() else "H"
return "Rect%s(%0.2f,%0.2f,%0.2f,%0.2f)" % (orientation, self.x1(), self.y1(), self.x2(), self.y2())
def intersects(self, that, threshold = 2):
if self.x1() - that.x2() - threshold > 0:
return False
if that.x1() - self.x2() - threshold > 0:
return False
if self.y1() - that.y2() - threshold > 0:
return False
if that.y1() - self.y2() - threshold > 0:
return False
return True
def contains(self, that):
return self.x1() <= that.x1() and self.x2() >= that.x2() and self.y1() <= that.y1() and self.y2() >= that.y2()
def debug_html(self, color="black", cls="black"):
fmt = '<div class="%s" style="position:absolute;left:%fpx;top:%fpx;width:%fpx;height:%fpx;border:1px %s solid;background-color:%s"></div>'
return fmt % (cls, self.x1(), self.y1(), self.width(), self.height(), color, color)
def sort_rect_by_position(x, y, dimension):
return lambda rect: y(rect) * dimension + x(rect)
def sort_rect(a, b):
ydiff = a.y1() - b.y1()
if abs(ydiff) > 0.7: return int(ydiff * 100)
xdiff = a.x1() - b.x1()
if abs(xdiff) > 0.7: return int(xdiff * 100)
heightdiff = a.y2() - b.y2()
return int(-heightdiff * 100)
def cluster_rects(lines):
table_group = [lines.pop()]
just_removed = table_group[:]
while len(just_removed) > 0:
removed = []
for test_against in just_removed:
i = 0
while i < len(lines):
if test_against.intersects(lines[i]):
removed.append(lines.pop(i))
else:
i += 1
table_group += removed
just_removed = removed
return table_group
# this is not particularly statistically sound, but I think that it works
def count_segments(list, expected_clusters):
list.sort()
expected_distance = (list[-1] - list[0]) / expected_clusters
last_item = list[0]
clusters = [1]
for item in list[1:]:
if item - last_item < expected_distance:
clusters[-1] += 1
else:
clusters.append(1)
last_item = item
return clusters
def pretty_much_equal(a, b, threshold = 2):
return abs(a - b) < threshold
class Curve(object):
def __init__(self, points):
#assert len(points) > 1
x = [float("inf"), float("-inf")]
y = x[:]
for p in points:
x[0] = min(p[0], x[0])
x[1] = max(p[0], x[1])
y[0] = min(p[1], y[0])
y[1] = max(p[1], y[1])
self.__bounds = Rect(x[0], y[0], x[1], y[1])
self.points = points
def bounds(self): return self.__bounds
class List(object):
def __init__(self, items):
#assert len(items) > 0
self.items = items
self.rect = items[0].bounds()
for i in items[1:]:
self.rect = self.rect.union(i.bounds())
def bounds(self): return self.rect
class TableBase(object):
def get_at(self, x, y): raise Exception("Not implemented")
def get_everything(self): raise Exception("Not implemented")
def rows(self): raise Exception("Not implemented")
def item_count(self): raise Exception("Not implemented")
def columns(self): raise Exception("Not implemented")
def bounds(self): raise Exception("Not implemented")
def cell_size(self, x, y): raise Exception("Not implemented")
def data_index(self, x, y): raise Exception("Not implemented")
class ImplicitTable(TableBase):
def __init__(self, bounds, table_data):
self.__bounds = bounds
self.__data = table_data
def get_at_pixel(self, x, y):
raise Exception("Not supported on implicit tables")
def get_at(self, x, y):
return self.__data[y][x]
def get_everything(self):
result = []
for c in self.__data: result += c
return result
def item_count(self):
count = 0
for row in self.__data:
for cell in row:
count += len(col)
return count
def rows(self): return len(self.__data)
def columns(self): return len(self.__data[0])
def bounds(self): return self.__bounds
def cell_size(self, x, y):
#assert x >= 0 and x < self.columns()
#assert y >= 0 and y < self.rows()
return (1, 1)
def data_index(self, x, y): return y * self.columns() + x
def debug_html(self):
result = '<table border="1">'
for row in self.__data:
result += '<tr>'
for cell in row:
result += '<td>'
for element in ell:
result += unicode(element).replace("<", "<").replace(">", ">")
result += '</td>'
result += '</tr>'
result += '</table>'
return result
class Table(TableBase):
def __init__(self, group):
ver = []
hor = []
for line in group:
(ver if line.vertical() else hor).append(line)
#assert len(ver) >= 2
#assert len(hor) >= 2
self.__columns = self.__identify_dimension(ver, Rect.xmid)
self.__rows = self.__identify_dimension(hor, Rect.ymid)
self.__init_data_layout()
if len(self.__columns) > 2:
missingC = self.__identify_missing_col_lines(ver)
missingC.sort(key=sort_rect_by_position(Rect.y1, Rect.xmid, self.__columns[-1]))
for missing in missingC:
rightColumn = self.__data_col_index(missing.xmid())
#assert rightColumn != 0 and rightColumn != len(self.__columns)
leftColumn = rightColumn - 1
beginIndex = self.__data_row_index(missing.y1())
endIndex = self.__data_row_index(missing.y2())
for i in xrange(beginIndex, endIndex):
self.__data_layout[i][rightColumn] = self.__data_layout[i][leftColumn]
if len(self.__rows) > 2:
missingR = self.__identify_missing_row_lines(hor)
missingR.sort(key=sort_rect_by_position(Rect.x1, Rect.ymid, self.__rows[-1]))
for missing in missingR:
topRow = self.__data_row_index(missing.ymid())
#assert topRow != 0 and topRow != len(self.__rows) - 1
bottomRow = topRow - 1
beginIndex = self.__data_col_index(missing.x1())
endIndex = self.__data_col_index(missing.x2())
# Do not merge into non-rectangular cells.
if beginIndex > 0:
prev = beginIndex - 1
if self.__data_layout[topRow][prev] == self.__data_layout[topRow][beginIndex]:
continue
if endIndex < len(self.__rows) - 1:
prev = endIndex - 1
if self.__data_layout[topRow][prev] == self.__data_layout[topRow][endIndex]:
continue
for i in xrange(beginIndex, endIndex):
self.__data_layout[bottomRow][i] = self.__data_layout[topRow][i]
self.__init_data_storage()
def get_at_pixel(self, x, y):
row_index = self.__data_row_index(y)
col_index = self.__data_col_index(x)
return self.get_at(col_index, row_index)
def get_at(self, x, y):
row = self.__data_layout[y]
data_index = row[x]
return self.__data_storage[data_index]
def get_everything(self):
result = []
for c in self.__data_storage: result += c
return result
def rows(self): return len(self.__rows) - 1
def columns(self): return len(self.__columns) - 1
def item_count(self):
count = 0
for cell in self.__data_storage: count += len(cell)
return count
def bounds(self):
return Rect(self.__columns[0], self.__rows[0], self.__columns[-1], self.__rows[-1])
def cell_size(self, x, y):
row_index = self.__data_row_index(y)
col_index = self.__data_col_index(x)
return self.__cell_size(col_index, row_index)
def data_index(self, x, y):
return self.__data_layout[y][x]
def debug_html(self):
result = '<table border="1">'
print_index = -1
for row_index in xrange(0, self.rows()):
row = self.__data_layout[row_index]
result += "<tr>"
for cell_index in xrange(0, len(row)):
cell = row[cell_index]
if print_index >= cell: continue
width, height = self.cell_size(cell_index, row_index)
colspan = (' colspan="%i"' % width) if width != 1 else ""
rowspan = (' rowspan="%i"' % height) if height != 1 else ""
result += "<td%s%s>" % (colspan, rowspan)
for element in self.get_at(cell_index, row_index):
result += unicode(element).replace("<", "<").replace(">", ">")
result += "</td>"
print_index = cell
result += "</tr>"
result += "</table>"
return result
def __identify_dimension(self, lines, key):
lines.sort(key=key)
dim = []
for line in lines:
value = key(line)
if len(dim) == 0 or value - dim[-1] > 1:
dim.append(value)
return dim
def __identify_missing_col_lines(self, vertical):
sort_key = sort_rect_by_position(Rect.y1, Rect.xmid, self.__rows[0] - self.__rows[-1])
vertical.sort(key=sort_key)
missing_lines = []
def add_missing_line(x, y1, y2):
missing_lines.append(Rect(x, y1, x, y2))
topY = self.__rows[0]
botY = self.__rows[-1] - 0.001
lastX = self.__columns[0]
lastY = botY
for line in vertical[1:]:
if not pretty_much_equal(line.xmid(), lastX):
if not pretty_much_equal(lastY, botY):
add_missing_line(lastX, lastY, botY)
lastY = topY
if not pretty_much_equal(line.y1(), lastY):
add_missing_line(line.xmid(), lastY, line.y1())
lastY = line.y2()
lastX = line.xmid()
return missing_lines
def __identify_missing_row_lines(self, horizontal):
sort_key = sort_rect_by_position(Rect.x1, Rect.ymid, self.__columns[-1] - self.__columns[0])
horizontal.sort(key=sort_key)
missing_lines = []
def add_missing_line(y, x1, x2):
missing_lines.append(Rect(x1, y, x2, y))
topX = self.__columns[0]
botX = self.__columns[-1] - 0.001
lastX = botX
lastY = self.__rows[0]
for line in horizontal[1:]:
if not pretty_much_equal(line.ymid(), lastY):
if not pretty_much_equal(lastX, botX):
add_missing_line(lastY, lastX, botX)
lastX = topX
if not pretty_much_equal(line.x1(), lastX):
add_missing_line(line.ymid(), lastX, line.x1())
lastY = line.ymid()
lastX = line.x2()
return missing_lines
def __init_data_layout(self):
self.__data_layout = []
i = 0
row_count = len(self.__rows) - 1
col_count = len(self.__columns) - 1
for _ in xrange(0, row_count):
row = []
for _ in xrange(0, col_count):
row.append(i)
i += 1
self.__data_layout.append(row)
def __init_data_storage(self):
i = 0
last_index = 0
for row_index in xrange(0, len(self.__data_layout)):
row = self.__data_layout[row_index]
for cell_index in xrange(0, len(row)):
if row[cell_index] > last_index:
i += 1
last_index = row[cell_index]
row[cell_index] = i
self.__data_storage = []
for i in xrange(0, self.__data_layout[-1][-1] + 1):
self.__data_storage.append([])
def __data_row_index(self, y):
return self.__dim_index(self.__rows, y)
def __data_col_index(self, x):
return self.__dim_index(self.__columns, x)
def __dim_index(self, array, value):
for i in xrange(1, len(array)):
ref_value = array[i]
if ref_value > value:
return i - 1
raise Exception("improbable (%g between %g and %g)" % (value, array[0], array[-1]))
def __cell_size(self, column, row):
value = self.__data_layout[row][column]
width = 0
x = column
while x >= 0 and self.__data_layout[row][x] == value:
width += 1
x -= 1
x = column + 1
while x < len(self.__data_layout[row]) and self.__data_layout[row][x] == value:
width += 1
x += 1
height = 0
y = row
while y >= 0 and self.__data_layout[y][column] == value:
height += 1
y -= 1
y = row + 1
while y < len(self.__data_layout) and self.__data_layout[y][column] == value:
height += 1
y += 1
return (width, height)
def main():
rects = [[45.120,39.720,494.340,53.640],
[504.840,39.720,559.620,53.640],
[46.380,354.960,104.520,370.980],
[336.960,354.960,366.060,370.980],
[278.880,339.000,336.960,354.960],
[46.380,322.980,220.740,339.000],
[46.380,306.960,220.740,322.980],
[46.380,291.000,220.740,306.960],
[46.380,274.980,220.740,291.000],
[46.380,258.960,220.740,274.980],
[46.380,243.000,220.740,258.960],
[46.380,226.980,220.740,243.000],
[46.380,210.960,220.740,226.980],
[46.020,627.240,46.500,709.740],
[557.460,627.240,557.940,709.740],
[46.020,709.740,557.940,710.220],
[46.020,626.760,557.940,627.240],
[46.140,559.260,46.620,590.760],
[557.340,559.260,557.820,590.760],
[46.140,590.760,557.880,591.240],
[46.140,558.780,557.880,559.260],
[45.660,371.760,47.160,386.220],
[46.140,355.260,46.620,370.260],
[46.140,83.220,46.620,354.720],
[104.280,370.260,104.760,387.720],
[104.280,354.720,104.760,371.760],
[162.420,338.760,162.900,387.720],
[220.500,370.260,220.980,387.720],
[220.500,210.720,220.980,339.240],
[278.640,338.760,279.120,387.720],
[336.720,370.260,337.200,387.720],
[336.720,354.720,337.200,371.760],
[336.720,338.760,337.200,355.260],
[365.820,370.260,366.300,387.720],
[365.820,354.720,366.300,371.760],
[402.360,338.760,402.840,387.720],
[460.440,354.720,460.920,387.720],
[518.580,370.260,519.060,387.720],
[518.100,82.740,519.600,371.760],
[556.860,83.220,558.360,386.220],
[45.660,386.220,558.300,387.720],
[45.660,370.200,558.300,371.700],
[46.140,354.720,337.260,355.200],
[336.720,354.720,365.820,355.200],
[366.300,354.720,558.300,355.200],
[46.140,338.760,558.300,339.240],
[46.140,322.740,558.300,323.220],
[46.140,306.720,558.300,307.200],
[46.140,290.760,558.300,291.240],
[46.140,274.740,558.300,275.220],
[46.140,258.720,558.300,259.200],
[46.140,242.760,558.300,243.240],
[46.140,226.740,558.300,227.220],
[46.140,210.720,558.300,211.200],
[46.140,194.760,558.300,195.240],
[46.140,178.740,558.300,179.220],
[46.140,162.720,558.300,163.200],
[46.140,146.760,558.300,147.240],
[46.140,130.740,558.300,131.220],
[46.140,114.720,558.300,115.200],
[46.140,98.760,558.300,99.240],
[46.140,82.740,558.300,83.220],]
lines = []
figures = []
tables = []
for i in xrange(0, len(rects)):
rect = Rect(*rects[i])
(lines if (rect.width() < 9 or rect.height() < 9) else figures).append(rect)
# group lines into tables
while len(lines) > 0:
table = cluster_rects(lines)
tables.append(table)
for table in tables:
t = Table(table)
print
if __name__ == "__main__":
main()
|
|
"""
Support for native homogeneous sets.
"""
import collections
import contextlib
import math
import operator
from llvmlite import ir
from numba.core import types, typing, cgutils
from numba.core.imputils import (lower_builtin, lower_cast,
iternext_impl, impl_ret_borrowed,
impl_ret_new_ref, impl_ret_untracked,
for_iter, call_len, RefType)
from numba.core.utils import cached_property
from numba.misc import quicksort
from numba.cpython import slicing
from numba.extending import intrinsic
def get_payload_struct(context, builder, set_type, ptr):
"""
Given a set value and type, get its payload structure (as a
reference, so that mutations are seen by all).
"""
payload_type = types.SetPayload(set_type)
ptrty = context.get_data_type(payload_type).as_pointer()
payload = builder.bitcast(ptr, ptrty)
return context.make_data_helper(builder, payload_type, ref=payload)
def get_entry_size(context, set_type):
"""
Return the entry size for the given set type.
"""
llty = context.get_data_type(types.SetEntry(set_type))
return context.get_abi_sizeof(llty)
# Note these values are special:
# - EMPTY is obtained by issuing memset(..., 0xFF)
# - (unsigned) EMPTY > (unsigned) DELETED > any other hash value
EMPTY = -1
DELETED = -2
FALLBACK = -43
# Minimal size of entries table. Must be a power of 2!
MINSIZE = 16
# Number of cache-friendly linear probes before switching to non-linear probing
LINEAR_PROBES = 3
DEBUG_ALLOCS = False
def get_hash_value(context, builder, typ, value):
"""
Compute the hash of the given value.
"""
typingctx = context.typing_context
fnty = typingctx.resolve_value_type(hash)
sig = fnty.get_call_type(typingctx, (typ,), {})
fn = context.get_function(fnty, sig)
h = fn(builder, (value,))
# Fixup reserved values
is_ok = is_hash_used(context, builder, h)
fallback = ir.Constant(h.type, FALLBACK)
return builder.select(is_ok, h, fallback)
@intrinsic
def _get_hash_value_intrinsic(typingctx, value):
def impl(context, builder, typ, args):
return get_hash_value(context, builder, value, args[0])
fnty = typingctx.resolve_value_type(hash)
sig = fnty.get_call_type(typingctx, (value,), {})
return sig, impl
def is_hash_empty(context, builder, h):
"""
Whether the hash value denotes an empty entry.
"""
empty = ir.Constant(h.type, EMPTY)
return builder.icmp_unsigned('==', h, empty)
def is_hash_deleted(context, builder, h):
"""
Whether the hash value denotes a deleted entry.
"""
deleted = ir.Constant(h.type, DELETED)
return builder.icmp_unsigned('==', h, deleted)
def is_hash_used(context, builder, h):
"""
Whether the hash value denotes an active entry.
"""
# Everything below DELETED is an used entry
deleted = ir.Constant(h.type, DELETED)
return builder.icmp_unsigned('<', h, deleted)
SetLoop = collections.namedtuple('SetLoop', ('index', 'entry', 'do_break'))
class _SetPayload(object):
def __init__(self, context, builder, set_type, ptr):
payload = get_payload_struct(context, builder, set_type, ptr)
self._context = context
self._builder = builder
self._ty = set_type
self._payload = payload
self._entries = payload._get_ptr_by_name('entries')
self._ptr = ptr
@property
def mask(self):
return self._payload.mask
@mask.setter
def mask(self, value):
# CAUTION: mask must be a power of 2 minus 1
self._payload.mask = value
@property
def used(self):
return self._payload.used
@used.setter
def used(self, value):
self._payload.used = value
@property
def fill(self):
return self._payload.fill
@fill.setter
def fill(self, value):
self._payload.fill = value
@property
def finger(self):
return self._payload.finger
@finger.setter
def finger(self, value):
self._payload.finger = value
@property
def dirty(self):
return self._payload.dirty
@dirty.setter
def dirty(self, value):
self._payload.dirty = value
@property
def entries(self):
"""
A pointer to the start of the entries array.
"""
return self._entries
@property
def ptr(self):
"""
A pointer to the start of the NRT-allocated area.
"""
return self._ptr
def get_entry(self, idx):
"""
Get entry number *idx*.
"""
entry_ptr = cgutils.gep(self._builder, self._entries, idx)
entry = self._context.make_data_helper(self._builder,
types.SetEntry(self._ty),
ref=entry_ptr)
return entry
def _lookup(self, item, h, for_insert=False):
"""
Lookup the *item* with the given hash values in the entries.
Return a (found, entry index) tuple:
- If found is true, <entry index> points to the entry containing
the item.
- If found is false, <entry index> points to the empty entry that
the item can be written to (only if *for_insert* is true)
"""
context = self._context
builder = self._builder
intp_t = h.type
mask = self.mask
dtype = self._ty.dtype
eqfn = context.get_function(operator.eq,
typing.signature(types.boolean, dtype, dtype))
one = ir.Constant(intp_t, 1)
five = ir.Constant(intp_t, 5)
# The perturbation value for probing
perturb = cgutils.alloca_once_value(builder, h)
# The index of the entry being considered: start with (hash & mask)
index = cgutils.alloca_once_value(builder,
builder.and_(h, mask))
if for_insert:
# The index of the first deleted entry in the lookup chain
free_index_sentinel = mask.type(-1) # highest unsigned index
free_index = cgutils.alloca_once_value(builder, free_index_sentinel)
bb_body = builder.append_basic_block("lookup.body")
bb_found = builder.append_basic_block("lookup.found")
bb_not_found = builder.append_basic_block("lookup.not_found")
bb_end = builder.append_basic_block("lookup.end")
def check_entry(i):
"""
Check entry *i* against the value being searched for.
"""
entry = self.get_entry(i)
entry_hash = entry.hash
with builder.if_then(builder.icmp_unsigned('==', h, entry_hash)):
# Hashes are equal, compare values
# (note this also ensures the entry is used)
eq = eqfn(builder, (item, entry.key))
with builder.if_then(eq):
builder.branch(bb_found)
with builder.if_then(is_hash_empty(context, builder, entry_hash)):
builder.branch(bb_not_found)
if for_insert:
# Memorize the index of the first deleted entry
with builder.if_then(is_hash_deleted(context, builder, entry_hash)):
j = builder.load(free_index)
j = builder.select(builder.icmp_unsigned('==', j, free_index_sentinel),
i, j)
builder.store(j, free_index)
# First linear probing. When the number of collisions is small,
# the lineary probing loop achieves better cache locality and
# is also slightly cheaper computationally.
with cgutils.for_range(builder, ir.Constant(intp_t, LINEAR_PROBES)):
i = builder.load(index)
check_entry(i)
i = builder.add(i, one)
i = builder.and_(i, mask)
builder.store(i, index)
# If not found after linear probing, switch to a non-linear
# perturbation keyed on the unmasked hash value.
# XXX how to tell LLVM this branch is unlikely?
builder.branch(bb_body)
with builder.goto_block(bb_body):
i = builder.load(index)
check_entry(i)
# Perturb to go to next entry:
# perturb >>= 5
# i = (i * 5 + 1 + perturb) & mask
p = builder.load(perturb)
p = builder.lshr(p, five)
i = builder.add(one, builder.mul(i, five))
i = builder.and_(mask, builder.add(i, p))
builder.store(i, index)
builder.store(p, perturb)
# Loop
builder.branch(bb_body)
with builder.goto_block(bb_not_found):
if for_insert:
# Not found => for insertion, return the index of the first
# deleted entry (if any), to avoid creating an infinite
# lookup chain (issue #1913).
i = builder.load(index)
j = builder.load(free_index)
i = builder.select(builder.icmp_unsigned('==', j, free_index_sentinel),
i, j)
builder.store(i, index)
builder.branch(bb_end)
with builder.goto_block(bb_found):
builder.branch(bb_end)
builder.position_at_end(bb_end)
found = builder.phi(ir.IntType(1), 'found')
found.add_incoming(cgutils.true_bit, bb_found)
found.add_incoming(cgutils.false_bit, bb_not_found)
return found, builder.load(index)
@contextlib.contextmanager
def _iterate(self, start=None):
"""
Iterate over the payload's entries. Yield a SetLoop.
"""
context = self._context
builder = self._builder
intp_t = context.get_value_type(types.intp)
one = ir.Constant(intp_t, 1)
size = builder.add(self.mask, one)
with cgutils.for_range(builder, size, start=start) as range_loop:
entry = self.get_entry(range_loop.index)
is_used = is_hash_used(context, builder, entry.hash)
with builder.if_then(is_used):
loop = SetLoop(index=range_loop.index, entry=entry,
do_break=range_loop.do_break)
yield loop
@contextlib.contextmanager
def _next_entry(self):
"""
Yield a random entry from the payload. Caller must ensure the
set isn't empty, otherwise the function won't end.
"""
context = self._context
builder = self._builder
intp_t = context.get_value_type(types.intp)
zero = ir.Constant(intp_t, 0)
one = ir.Constant(intp_t, 1)
mask = self.mask
# Start walking the entries from the stored "search finger" and
# break as soon as we find a used entry.
bb_body = builder.append_basic_block('next_entry_body')
bb_end = builder.append_basic_block('next_entry_end')
index = cgutils.alloca_once_value(builder, self.finger)
builder.branch(bb_body)
with builder.goto_block(bb_body):
i = builder.load(index)
# ANDing with mask ensures we stay inside the table boundaries
i = builder.and_(mask, builder.add(i, one))
builder.store(i, index)
entry = self.get_entry(i)
is_used = is_hash_used(context, builder, entry.hash)
builder.cbranch(is_used, bb_end, bb_body)
builder.position_at_end(bb_end)
# Update the search finger with the next position. This avoids
# O(n**2) behaviour when pop() is called in a loop.
i = builder.load(index)
self.finger = i
yield self.get_entry(i)
class SetInstance(object):
def __init__(self, context, builder, set_type, set_val):
self._context = context
self._builder = builder
self._ty = set_type
self._entrysize = get_entry_size(context, set_type)
self._set = context.make_helper(builder, set_type, set_val)
@property
def dtype(self):
return self._ty.dtype
@property
def payload(self):
"""
The _SetPayload for this set.
"""
# This cannot be cached as the pointer can move around!
context = self._context
builder = self._builder
ptr = self._context.nrt.meminfo_data(builder, self.meminfo)
return _SetPayload(context, builder, self._ty, ptr)
@property
def value(self):
return self._set._getvalue()
@property
def meminfo(self):
return self._set.meminfo
@property
def parent(self):
return self._set.parent
@parent.setter
def parent(self, value):
self._set.parent = value
def get_size(self):
"""
Return the number of elements in the size.
"""
return self.payload.used
def set_dirty(self, val):
if self._ty.reflected:
self.payload.dirty = cgutils.true_bit if val else cgutils.false_bit
def _add_entry(self, payload, entry, item, h, do_resize=True):
context = self._context
builder = self._builder
old_hash = entry.hash
entry.hash = h
entry.key = item
# used++
used = payload.used
one = ir.Constant(used.type, 1)
used = payload.used = builder.add(used, one)
# fill++ if entry wasn't a deleted one
with builder.if_then(is_hash_empty(context, builder, old_hash),
likely=True):
payload.fill = builder.add(payload.fill, one)
# Grow table if necessary
if do_resize:
self.upsize(used)
self.set_dirty(True)
def _add_key(self, payload, item, h, do_resize=True):
context = self._context
builder = self._builder
found, i = payload._lookup(item, h, for_insert=True)
not_found = builder.not_(found)
with builder.if_then(not_found):
# Not found => add it
entry = payload.get_entry(i)
old_hash = entry.hash
entry.hash = h
entry.key = item
# used++
used = payload.used
one = ir.Constant(used.type, 1)
used = payload.used = builder.add(used, one)
# fill++ if entry wasn't a deleted one
with builder.if_then(is_hash_empty(context, builder, old_hash),
likely=True):
payload.fill = builder.add(payload.fill, one)
# Grow table if necessary
if do_resize:
self.upsize(used)
self.set_dirty(True)
def _remove_entry(self, payload, entry, do_resize=True):
# Mark entry deleted
entry.hash = ir.Constant(entry.hash.type, DELETED)
# used--
used = payload.used
one = ir.Constant(used.type, 1)
used = payload.used = self._builder.sub(used, one)
# Shrink table if necessary
if do_resize:
self.downsize(used)
self.set_dirty(True)
def _remove_key(self, payload, item, h, do_resize=True):
context = self._context
builder = self._builder
found, i = payload._lookup(item, h)
with builder.if_then(found):
entry = payload.get_entry(i)
self._remove_entry(payload, entry, do_resize)
return found
def add(self, item, do_resize=True):
context = self._context
builder = self._builder
payload = self.payload
h = get_hash_value(context, builder, self._ty.dtype, item)
self._add_key(payload, item, h, do_resize)
def add_pyapi(self, pyapi, item, do_resize=True):
"""A version of .add for use inside functions following Python calling
convention.
"""
context = self._context
builder = self._builder
payload = self.payload
h = self._pyapi_get_hash_value(pyapi, context, builder, item)
self._add_key(payload, item, h, do_resize)
def _pyapi_get_hash_value(self, pyapi, context, builder, item):
"""Python API compatible version of `get_hash_value()`.
"""
argtypes = [self._ty.dtype]
resty = types.intp
def wrapper(val):
return _get_hash_value_intrinsic(val)
args = [item]
sig = typing.signature(resty, *argtypes)
is_error, retval = pyapi.call_jit_code(wrapper, sig, args)
# Handle return status
with builder.if_then(is_error, likely=False):
# Raise nopython exception as a Python exception
builder.ret(pyapi.get_null_object())
return retval
def contains(self, item):
context = self._context
builder = self._builder
payload = self.payload
h = get_hash_value(context, builder, self._ty.dtype, item)
found, i = payload._lookup(item, h)
return found
def discard(self, item):
context = self._context
builder = self._builder
payload = self.payload
h = get_hash_value(context, builder, self._ty.dtype, item)
found = self._remove_key(payload, item, h)
return found
def pop(self):
context = self._context
builder = self._builder
lty = context.get_value_type(self._ty.dtype)
key = cgutils.alloca_once(builder, lty)
payload = self.payload
with payload._next_entry() as entry:
builder.store(entry.key, key)
self._remove_entry(payload, entry)
return builder.load(key)
def clear(self):
context = self._context
builder = self._builder
intp_t = context.get_value_type(types.intp)
minsize = ir.Constant(intp_t, MINSIZE)
self._replace_payload(minsize)
self.set_dirty(True)
def copy(self):
"""
Return a copy of this set.
"""
context = self._context
builder = self._builder
payload = self.payload
used = payload.used
fill = payload.fill
other = type(self)(context, builder, self._ty, None)
no_deleted_entries = builder.icmp_unsigned('==', used, fill)
with builder.if_else(no_deleted_entries, likely=True) \
as (if_no_deleted, if_deleted):
with if_no_deleted:
# No deleted entries => raw copy the payload
ok = other._copy_payload(payload)
with builder.if_then(builder.not_(ok), likely=False):
context.call_conv.return_user_exc(builder, MemoryError,
("cannot copy set",))
with if_deleted:
# Deleted entries => re-insert entries one by one
nentries = self.choose_alloc_size(context, builder, used)
ok = other._allocate_payload(nentries)
with builder.if_then(builder.not_(ok), likely=False):
context.call_conv.return_user_exc(builder, MemoryError,
("cannot copy set",))
other_payload = other.payload
with payload._iterate() as loop:
entry = loop.entry
other._add_key(other_payload, entry.key, entry.hash,
do_resize=False)
return other
def intersect(self, other):
"""
In-place intersection with *other* set.
"""
context = self._context
builder = self._builder
payload = self.payload
other_payload = other.payload
with payload._iterate() as loop:
entry = loop.entry
found, _ = other_payload._lookup(entry.key, entry.hash)
with builder.if_then(builder.not_(found)):
self._remove_entry(payload, entry, do_resize=False)
# Final downsize
self.downsize(payload.used)
def difference(self, other):
"""
In-place difference with *other* set.
"""
context = self._context
builder = self._builder
payload = self.payload
other_payload = other.payload
with other_payload._iterate() as loop:
entry = loop.entry
self._remove_key(payload, entry.key, entry.hash, do_resize=False)
# Final downsize
self.downsize(payload.used)
def symmetric_difference(self, other):
"""
In-place symmetric difference with *other* set.
"""
context = self._context
builder = self._builder
other_payload = other.payload
with other_payload._iterate() as loop:
key = loop.entry.key
h = loop.entry.hash
# We must reload our payload as it may be resized during the loop
payload = self.payload
found, i = payload._lookup(key, h, for_insert=True)
entry = payload.get_entry(i)
with builder.if_else(found) as (if_common, if_not_common):
with if_common:
self._remove_entry(payload, entry, do_resize=False)
with if_not_common:
self._add_entry(payload, entry, key, h)
# Final downsize
self.downsize(self.payload.used)
def issubset(self, other, strict=False):
context = self._context
builder = self._builder
payload = self.payload
other_payload = other.payload
cmp_op = '<' if strict else '<='
res = cgutils.alloca_once_value(builder, cgutils.true_bit)
with builder.if_else(
builder.icmp_unsigned(cmp_op, payload.used, other_payload.used)
) as (if_smaller, if_larger):
with if_larger:
# self larger than other => self cannot possibly a subset
builder.store(cgutils.false_bit, res)
with if_smaller:
# check whether each key of self is in other
with payload._iterate() as loop:
entry = loop.entry
found, _ = other_payload._lookup(entry.key, entry.hash)
with builder.if_then(builder.not_(found)):
builder.store(cgutils.false_bit, res)
loop.do_break()
return builder.load(res)
def isdisjoint(self, other):
context = self._context
builder = self._builder
payload = self.payload
other_payload = other.payload
res = cgutils.alloca_once_value(builder, cgutils.true_bit)
def check(smaller, larger):
# Loop over the smaller of the two, and search in the larger
with smaller._iterate() as loop:
entry = loop.entry
found, _ = larger._lookup(entry.key, entry.hash)
with builder.if_then(found):
builder.store(cgutils.false_bit, res)
loop.do_break()
with builder.if_else(
builder.icmp_unsigned('>', payload.used, other_payload.used)
) as (if_larger, otherwise):
with if_larger:
# len(self) > len(other)
check(other_payload, payload)
with otherwise:
# len(self) <= len(other)
check(payload, other_payload)
return builder.load(res)
def equals(self, other):
context = self._context
builder = self._builder
payload = self.payload
other_payload = other.payload
res = cgutils.alloca_once_value(builder, cgutils.true_bit)
with builder.if_else(
builder.icmp_unsigned('==', payload.used, other_payload.used)
) as (if_same_size, otherwise):
with if_same_size:
# same sizes => check whether each key of self is in other
with payload._iterate() as loop:
entry = loop.entry
found, _ = other_payload._lookup(entry.key, entry.hash)
with builder.if_then(builder.not_(found)):
builder.store(cgutils.false_bit, res)
loop.do_break()
with otherwise:
# different sizes => cannot possibly be equal
builder.store(cgutils.false_bit, res)
return builder.load(res)
@classmethod
def allocate_ex(cls, context, builder, set_type, nitems=None):
"""
Allocate a SetInstance with its storage.
Return a (ok, instance) tuple where *ok* is a LLVM boolean and
*instance* is a SetInstance object (the object's contents are
only valid when *ok* is true).
"""
intp_t = context.get_value_type(types.intp)
if nitems is None:
nentries = ir.Constant(intp_t, MINSIZE)
else:
if isinstance(nitems, int):
nitems = ir.Constant(intp_t, nitems)
nentries = cls.choose_alloc_size(context, builder, nitems)
self = cls(context, builder, set_type, None)
ok = self._allocate_payload(nentries)
return ok, self
@classmethod
def allocate(cls, context, builder, set_type, nitems=None):
"""
Allocate a SetInstance with its storage. Same as allocate_ex(),
but return an initialized *instance*. If allocation failed,
control is transferred to the caller using the target's current
call convention.
"""
ok, self = cls.allocate_ex(context, builder, set_type, nitems)
with builder.if_then(builder.not_(ok), likely=False):
context.call_conv.return_user_exc(builder, MemoryError,
("cannot allocate set",))
return self
@classmethod
def from_meminfo(cls, context, builder, set_type, meminfo):
"""
Allocate a new set instance pointing to an existing payload
(a meminfo pointer).
Note the parent field has to be filled by the caller.
"""
self = cls(context, builder, set_type, None)
self._set.meminfo = meminfo
self._set.parent = context.get_constant_null(types.pyobject)
context.nrt.incref(builder, set_type, self.value)
# Payload is part of the meminfo, no need to touch it
return self
@classmethod
def choose_alloc_size(cls, context, builder, nitems):
"""
Choose a suitable number of entries for the given number of items.
"""
intp_t = nitems.type
one = ir.Constant(intp_t, 1)
minsize = ir.Constant(intp_t, MINSIZE)
# Ensure number of entries >= 2 * used
min_entries = builder.shl(nitems, one)
# Find out first suitable power of 2, starting from MINSIZE
size_p = cgutils.alloca_once_value(builder, minsize)
bb_body = builder.append_basic_block("calcsize.body")
bb_end = builder.append_basic_block("calcsize.end")
builder.branch(bb_body)
with builder.goto_block(bb_body):
size = builder.load(size_p)
is_large_enough = builder.icmp_unsigned('>=', size, min_entries)
with builder.if_then(is_large_enough, likely=False):
builder.branch(bb_end)
next_size = builder.shl(size, one)
builder.store(next_size, size_p)
builder.branch(bb_body)
builder.position_at_end(bb_end)
return builder.load(size_p)
def upsize(self, nitems):
"""
When adding to the set, ensure it is properly sized for the given
number of used entries.
"""
context = self._context
builder = self._builder
intp_t = nitems.type
one = ir.Constant(intp_t, 1)
two = ir.Constant(intp_t, 2)
payload = self.payload
# Ensure number of entries >= 2 * used
min_entries = builder.shl(nitems, one)
size = builder.add(payload.mask, one)
need_resize = builder.icmp_unsigned('>=', min_entries, size)
with builder.if_then(need_resize, likely=False):
# Find out next suitable size
new_size_p = cgutils.alloca_once_value(builder, size)
bb_body = builder.append_basic_block("calcsize.body")
bb_end = builder.append_basic_block("calcsize.end")
builder.branch(bb_body)
with builder.goto_block(bb_body):
# Multiply by 4 (ensuring size remains a power of two)
new_size = builder.load(new_size_p)
new_size = builder.shl(new_size, two)
builder.store(new_size, new_size_p)
is_too_small = builder.icmp_unsigned('>=', min_entries, new_size)
builder.cbranch(is_too_small, bb_body, bb_end)
builder.position_at_end(bb_end)
new_size = builder.load(new_size_p)
if DEBUG_ALLOCS:
context.printf(builder,
"upsize to %zd items: current size = %zd, "
"min entries = %zd, new size = %zd\n",
nitems, size, min_entries, new_size)
self._resize(payload, new_size, "cannot grow set")
def downsize(self, nitems):
"""
When removing from the set, ensure it is properly sized for the given
number of used entries.
"""
context = self._context
builder = self._builder
intp_t = nitems.type
one = ir.Constant(intp_t, 1)
two = ir.Constant(intp_t, 2)
minsize = ir.Constant(intp_t, MINSIZE)
payload = self.payload
# Ensure entries >= max(2 * used, MINSIZE)
min_entries = builder.shl(nitems, one)
min_entries = builder.select(builder.icmp_unsigned('>=', min_entries, minsize),
min_entries, minsize)
# Shrink only if size >= 4 * min_entries && size > MINSIZE
max_size = builder.shl(min_entries, two)
size = builder.add(payload.mask, one)
need_resize = builder.and_(
builder.icmp_unsigned('<=', max_size, size),
builder.icmp_unsigned('<', minsize, size))
with builder.if_then(need_resize, likely=False):
# Find out next suitable size
new_size_p = cgutils.alloca_once_value(builder, size)
bb_body = builder.append_basic_block("calcsize.body")
bb_end = builder.append_basic_block("calcsize.end")
builder.branch(bb_body)
with builder.goto_block(bb_body):
# Divide by 2 (ensuring size remains a power of two)
new_size = builder.load(new_size_p)
new_size = builder.lshr(new_size, one)
# Keep current size if new size would be < min_entries
is_too_small = builder.icmp_unsigned('>', min_entries, new_size)
with builder.if_then(is_too_small):
builder.branch(bb_end)
builder.store(new_size, new_size_p)
builder.branch(bb_body)
builder.position_at_end(bb_end)
# Ensure new_size >= MINSIZE
new_size = builder.load(new_size_p)
# At this point, new_size should be < size if the factors
# above were chosen carefully!
if DEBUG_ALLOCS:
context.printf(builder,
"downsize to %zd items: current size = %zd, "
"min entries = %zd, new size = %zd\n",
nitems, size, min_entries, new_size)
self._resize(payload, new_size, "cannot shrink set")
def _resize(self, payload, nentries, errmsg):
"""
Resize the payload to the given number of entries.
CAUTION: *nentries* must be a power of 2!
"""
context = self._context
builder = self._builder
# Allocate new entries
old_payload = payload
ok = self._allocate_payload(nentries, realloc=True)
with builder.if_then(builder.not_(ok), likely=False):
context.call_conv.return_user_exc(builder, MemoryError,
(errmsg,))
# Re-insert old entries
payload = self.payload
with old_payload._iterate() as loop:
entry = loop.entry
self._add_key(payload, entry.key, entry.hash,
do_resize=False)
self._free_payload(old_payload.ptr)
def _replace_payload(self, nentries):
"""
Replace the payload with a new empty payload with the given number
of entries.
CAUTION: *nentries* must be a power of 2!
"""
context = self._context
builder = self._builder
# Free old payload
self._free_payload(self.payload.ptr)
ok = self._allocate_payload(nentries, realloc=True)
with builder.if_then(builder.not_(ok), likely=False):
context.call_conv.return_user_exc(builder, MemoryError,
("cannot reallocate set",))
def _allocate_payload(self, nentries, realloc=False):
"""
Allocate and initialize payload for the given number of entries.
If *realloc* is True, the existing meminfo is reused.
CAUTION: *nentries* must be a power of 2!
"""
context = self._context
builder = self._builder
ok = cgutils.alloca_once_value(builder, cgutils.true_bit)
intp_t = context.get_value_type(types.intp)
zero = ir.Constant(intp_t, 0)
one = ir.Constant(intp_t, 1)
payload_type = context.get_data_type(types.SetPayload(self._ty))
payload_size = context.get_abi_sizeof(payload_type)
entry_size = self._entrysize
# Account for the fact that the payload struct already contains an entry
payload_size -= entry_size
# Total allocation size = <payload header size> + nentries * entry_size
allocsize, ovf = cgutils.muladd_with_overflow(builder, nentries,
ir.Constant(intp_t, entry_size),
ir.Constant(intp_t, payload_size))
with builder.if_then(ovf, likely=False):
builder.store(cgutils.false_bit, ok)
with builder.if_then(builder.load(ok), likely=True):
if realloc:
meminfo = self._set.meminfo
ptr = context.nrt.meminfo_varsize_alloc(builder, meminfo,
size=allocsize)
alloc_ok = cgutils.is_null(builder, ptr)
else:
meminfo = context.nrt.meminfo_new_varsize(builder, size=allocsize)
alloc_ok = cgutils.is_null(builder, meminfo)
with builder.if_else(cgutils.is_null(builder, meminfo),
likely=False) as (if_error, if_ok):
with if_error:
builder.store(cgutils.false_bit, ok)
with if_ok:
if not realloc:
self._set.meminfo = meminfo
self._set.parent = context.get_constant_null(types.pyobject)
payload = self.payload
# Initialize entries to 0xff (EMPTY)
cgutils.memset(builder, payload.ptr, allocsize, 0xFF)
payload.used = zero
payload.fill = zero
payload.finger = zero
new_mask = builder.sub(nentries, one)
payload.mask = new_mask
if DEBUG_ALLOCS:
context.printf(builder,
"allocated %zd bytes for set at %p: mask = %zd\n",
allocsize, payload.ptr, new_mask)
return builder.load(ok)
def _free_payload(self, ptr):
"""
Free an allocated old payload at *ptr*.
"""
self._context.nrt.meminfo_varsize_free(self._builder, self.meminfo, ptr)
def _copy_payload(self, src_payload):
"""
Raw-copy the given payload into self.
"""
context = self._context
builder = self._builder
ok = cgutils.alloca_once_value(builder, cgutils.true_bit)
intp_t = context.get_value_type(types.intp)
zero = ir.Constant(intp_t, 0)
one = ir.Constant(intp_t, 1)
payload_type = context.get_data_type(types.SetPayload(self._ty))
payload_size = context.get_abi_sizeof(payload_type)
entry_size = self._entrysize
# Account for the fact that the payload struct already contains an entry
payload_size -= entry_size
mask = src_payload.mask
nentries = builder.add(one, mask)
# Total allocation size = <payload header size> + nentries * entry_size
# (note there can't be any overflow since we're reusing an existing
# payload's parameters)
allocsize = builder.add(ir.Constant(intp_t, payload_size),
builder.mul(ir.Constant(intp_t, entry_size),
nentries))
with builder.if_then(builder.load(ok), likely=True):
meminfo = context.nrt.meminfo_new_varsize(builder, size=allocsize)
alloc_ok = cgutils.is_null(builder, meminfo)
with builder.if_else(cgutils.is_null(builder, meminfo),
likely=False) as (if_error, if_ok):
with if_error:
builder.store(cgutils.false_bit, ok)
with if_ok:
self._set.meminfo = meminfo
payload = self.payload
payload.used = src_payload.used
payload.fill = src_payload.fill
payload.finger = zero
payload.mask = mask
cgutils.raw_memcpy(builder, payload.entries,
src_payload.entries, nentries,
entry_size)
if DEBUG_ALLOCS:
context.printf(builder,
"allocated %zd bytes for set at %p: mask = %zd\n",
allocsize, payload.ptr, mask)
return builder.load(ok)
class SetIterInstance(object):
def __init__(self, context, builder, iter_type, iter_val):
self._context = context
self._builder = builder
self._ty = iter_type
self._iter = context.make_helper(builder, iter_type, iter_val)
ptr = self._context.nrt.meminfo_data(builder, self.meminfo)
self._payload = _SetPayload(context, builder, self._ty.container, ptr)
@classmethod
def from_set(cls, context, builder, iter_type, set_val):
set_inst = SetInstance(context, builder, iter_type.container, set_val)
self = cls(context, builder, iter_type, None)
index = context.get_constant(types.intp, 0)
self._iter.index = cgutils.alloca_once_value(builder, index)
self._iter.meminfo = set_inst.meminfo
return self
@property
def value(self):
return self._iter._getvalue()
@property
def meminfo(self):
return self._iter.meminfo
@property
def index(self):
return self._builder.load(self._iter.index)
@index.setter
def index(self, value):
self._builder.store(value, self._iter.index)
def iternext(self, result):
index = self.index
payload = self._payload
one = ir.Constant(index.type, 1)
result.set_exhausted()
with payload._iterate(start=index) as loop:
# An entry was found
entry = loop.entry
result.set_valid()
result.yield_(entry.key)
self.index = self._builder.add(loop.index, one)
loop.do_break()
#-------------------------------------------------------------------------------
# Constructors
def build_set(context, builder, set_type, items):
"""
Build a set of the given type, containing the given items.
"""
nitems = len(items)
inst = SetInstance.allocate(context, builder, set_type, nitems)
# Populate set. Inlining the insertion code for each item would be very
# costly, instead we create a LLVM array and iterate over it.
array = cgutils.pack_array(builder, items)
array_ptr = cgutils.alloca_once_value(builder, array)
count = context.get_constant(types.intp, nitems)
with cgutils.for_range(builder, count) as loop:
item = builder.load(cgutils.gep(builder, array_ptr, 0, loop.index))
inst.add(item)
return impl_ret_new_ref(context, builder, set_type, inst.value)
@lower_builtin(set)
def set_empty_constructor(context, builder, sig, args):
set_type = sig.return_type
inst = SetInstance.allocate(context, builder, set_type)
return impl_ret_new_ref(context, builder, set_type, inst.value)
@lower_builtin(set, types.IterableType)
def set_constructor(context, builder, sig, args):
set_type = sig.return_type
items_type, = sig.args
items, = args
# If the argument has a len(), preallocate the set so as to
# avoid resizes.
n = call_len(context, builder, items_type, items)
inst = SetInstance.allocate(context, builder, set_type, n)
with for_iter(context, builder, items_type, items) as loop:
inst.add(loop.value)
return impl_ret_new_ref(context, builder, set_type, inst.value)
#-------------------------------------------------------------------------------
# Various operations
@lower_builtin(len, types.Set)
def set_len(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
return inst.get_size()
@lower_builtin(operator.contains, types.Set, types.Any)
def in_set(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
return inst.contains(args[1])
@lower_builtin('getiter', types.Set)
def getiter_set(context, builder, sig, args):
inst = SetIterInstance.from_set(context, builder, sig.return_type, args[0])
return impl_ret_borrowed(context, builder, sig.return_type, inst.value)
@lower_builtin('iternext', types.SetIter)
@iternext_impl(RefType.BORROWED)
def iternext_listiter(context, builder, sig, args, result):
inst = SetIterInstance(context, builder, sig.args[0], args[0])
inst.iternext(result)
#-------------------------------------------------------------------------------
# Methods
# One-item-at-a-time operations
@lower_builtin("set.add", types.Set, types.Any)
def set_add(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
item = args[1]
inst.add(item)
return context.get_dummy_value()
@lower_builtin("set.discard", types.Set, types.Any)
def set_discard(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
item = args[1]
inst.discard(item)
return context.get_dummy_value()
@lower_builtin("set.pop", types.Set)
def set_pop(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
used = inst.payload.used
with builder.if_then(cgutils.is_null(builder, used), likely=False):
context.call_conv.return_user_exc(builder, KeyError,
("set.pop(): empty set",))
return inst.pop()
@lower_builtin("set.remove", types.Set, types.Any)
def set_remove(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
item = args[1]
found = inst.discard(item)
with builder.if_then(builder.not_(found), likely=False):
context.call_conv.return_user_exc(builder, KeyError,
("set.remove(): key not in set",))
return context.get_dummy_value()
# Mutating set operations
@lower_builtin("set.clear", types.Set)
def set_clear(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
inst.clear()
return context.get_dummy_value()
@lower_builtin("set.copy", types.Set)
def set_copy(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = inst.copy()
return impl_ret_new_ref(context, builder, sig.return_type, other.value)
@lower_builtin("set.difference_update", types.Set, types.IterableType)
def set_difference_update(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
inst.difference(other)
return context.get_dummy_value()
@lower_builtin("set.intersection_update", types.Set, types.Set)
def set_intersection_update(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
inst.intersect(other)
return context.get_dummy_value()
@lower_builtin("set.symmetric_difference_update", types.Set, types.Set)
def set_symmetric_difference_update(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
inst.symmetric_difference(other)
return context.get_dummy_value()
@lower_builtin("set.update", types.Set, types.IterableType)
def set_update(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
items_type = sig.args[1]
items = args[1]
# If the argument has a len(), assume there are few collisions and
# presize to len(set) + len(items)
n = call_len(context, builder, items_type, items)
if n is not None:
new_size = builder.add(inst.payload.used, n)
inst.upsize(new_size)
with for_iter(context, builder, items_type, items) as loop:
inst.add(loop.value)
if n is not None:
# If we pre-grew the set, downsize in case there were many collisions
inst.downsize(inst.payload.used)
return context.get_dummy_value()
for op_, op_impl in [
(operator.iand, set_intersection_update),
(operator.ior, set_update),
(operator.isub, set_difference_update),
(operator.ixor, set_symmetric_difference_update),
]:
@lower_builtin(op_, types.Set, types.Set)
def set_inplace(context, builder, sig, args, op_impl=op_impl):
assert sig.return_type == sig.args[0]
op_impl(context, builder, sig, args)
return impl_ret_borrowed(context, builder, sig.args[0], args[0])
# Set operations creating a new set
@lower_builtin(operator.sub, types.Set, types.Set)
@lower_builtin("set.difference", types.Set, types.Set)
def set_difference(context, builder, sig, args):
def difference_impl(a, b):
s = a.copy()
s.difference_update(b)
return s
return context.compile_internal(builder, difference_impl, sig, args)
@lower_builtin(operator.and_, types.Set, types.Set)
@lower_builtin("set.intersection", types.Set, types.Set)
def set_intersection(context, builder, sig, args):
def intersection_impl(a, b):
if len(a) < len(b):
s = a.copy()
s.intersection_update(b)
return s
else:
s = b.copy()
s.intersection_update(a)
return s
return context.compile_internal(builder, intersection_impl, sig, args)
@lower_builtin(operator.xor, types.Set, types.Set)
@lower_builtin("set.symmetric_difference", types.Set, types.Set)
def set_symmetric_difference(context, builder, sig, args):
def symmetric_difference_impl(a, b):
if len(a) > len(b):
s = a.copy()
s.symmetric_difference_update(b)
return s
else:
s = b.copy()
s.symmetric_difference_update(a)
return s
return context.compile_internal(builder, symmetric_difference_impl,
sig, args)
@lower_builtin(operator.or_, types.Set, types.Set)
@lower_builtin("set.union", types.Set, types.Set)
def set_union(context, builder, sig, args):
def union_impl(a, b):
if len(a) > len(b):
s = a.copy()
s.update(b)
return s
else:
s = b.copy()
s.update(a)
return s
return context.compile_internal(builder, union_impl, sig, args)
# Predicates
@lower_builtin("set.isdisjoint", types.Set, types.Set)
def set_isdisjoint(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
return inst.isdisjoint(other)
@lower_builtin(operator.le, types.Set, types.Set)
@lower_builtin("set.issubset", types.Set, types.Set)
def set_issubset(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
return inst.issubset(other)
@lower_builtin(operator.ge, types.Set, types.Set)
@lower_builtin("set.issuperset", types.Set, types.Set)
def set_issuperset(context, builder, sig, args):
def superset_impl(a, b):
return b.issubset(a)
return context.compile_internal(builder, superset_impl, sig, args)
@lower_builtin(operator.eq, types.Set, types.Set)
def set_isdisjoint(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
return inst.equals(other)
@lower_builtin(operator.ne, types.Set, types.Set)
def set_ne(context, builder, sig, args):
def ne_impl(a, b):
return not a == b
return context.compile_internal(builder, ne_impl, sig, args)
@lower_builtin(operator.lt, types.Set, types.Set)
def set_lt(context, builder, sig, args):
inst = SetInstance(context, builder, sig.args[0], args[0])
other = SetInstance(context, builder, sig.args[1], args[1])
return inst.issubset(other, strict=True)
@lower_builtin(operator.gt, types.Set, types.Set)
def set_gt(context, builder, sig, args):
def gt_impl(a, b):
return b < a
return context.compile_internal(builder, gt_impl, sig, args)
@lower_builtin(operator.is_, types.Set, types.Set)
def set_is(context, builder, sig, args):
a = SetInstance(context, builder, sig.args[0], args[0])
b = SetInstance(context, builder, sig.args[1], args[1])
ma = builder.ptrtoint(a.meminfo, cgutils.intp_t)
mb = builder.ptrtoint(b.meminfo, cgutils.intp_t)
return builder.icmp_signed('==', ma, mb)
# -----------------------------------------------------------------------------
# Implicit casting
@lower_cast(types.Set, types.Set)
def set_to_set(context, builder, fromty, toty, val):
# Casting from non-reflected to reflected
assert fromty.dtype == toty.dtype
return val
|
|
#!/usr/bin/env python
# encoding: utf-8
"""Contains the SnippetManager facade used by all Vim Functions."""
from collections import defaultdict
from functools import wraps
import os
import platform
import traceback
from UltiSnips import _vim
from UltiSnips._diff import diff, guess_edit
from UltiSnips.compatibility import as_unicode
from UltiSnips.position import Position
from UltiSnips.snippet.definition import UltiSnipsSnippetDefinition
from UltiSnips.snippet.source import UltiSnipsFileSource, SnipMateFileSource, \
find_all_snippet_files, find_snippet_files, AddedSnippetsSource
from UltiSnips.text import escape
from UltiSnips.vim_state import VimState, VisualContentPreserver
def _ask_user(a, formatted):
"""Asks the user using inputlist() and returns the selected element or
None."""
try:
rv = _vim.eval('inputlist(%s)' % _vim.escape(formatted))
if rv is None or rv == '0':
return None
rv = int(rv)
if rv > len(a):
rv = len(a)
return a[rv - 1]
except _vim.error:
# Likely "invalid expression", but might be translated. We have no way
# of knowing the exact error, therefore, we ignore all errors silently.
return None
except KeyboardInterrupt:
return None
def _ask_snippets(snippets):
"""Given a list of snippets, ask the user which one they want to use, and
return it."""
display = [as_unicode('%i: %s (%s)') % (i + 1, escape(s.description, '\\'),
escape(s.location, '\\')) for i, s in enumerate(snippets)]
return _ask_user(snippets, display)
def err_to_scratch_buffer(func):
"""Decorator that will catch any Exception that 'func' throws and displays
it in a new Vim scratch buffer."""
@wraps(func)
def wrapper(self, *args, **kwds):
try:
return func(self, *args, **kwds)
except: # pylint: disable=bare-except
msg = \
"""An error occured. This is either a bug in UltiSnips or a bug in a
snippet definition. If you think this is a bug, please report it to
https://github.com/SirVer/ultisnips/issues/new.
Following is the full stack trace:
"""
msg += traceback.format_exc()
# Vim sends no WinLeave msg here.
self._leaving_buffer() # pylint:disable=protected-access
_vim.new_scratch_buffer(msg)
return wrapper
# TODO(sirver): This class is still too long. It should only contain public
# facing methods, most of the private methods should be moved outside of it.
class SnippetManager(object):
"""The main entry point for all UltiSnips functionality.
All Vim functions call methods in this class.
"""
def __init__(self, expand_trigger, forward_trigger, backward_trigger):
self.expand_trigger = expand_trigger
self.forward_trigger = forward_trigger
self.backward_trigger = backward_trigger
self._inner_mappings_in_place = False
self._supertab_keys = None
self._csnippets = []
self._buffer_filetypes = defaultdict(lambda: ['all'])
self._vstate = VimState()
self._visual_content = VisualContentPreserver()
self._snippet_sources = []
self._added_snippets_source = AddedSnippetsSource()
self.register_snippet_source('ultisnips_files', UltiSnipsFileSource())
self.register_snippet_source('added', self._added_snippets_source)
enable_snipmate = True
if _vim.eval("exists('g:UltiSnipsEnableSnipMate')") == '1':
enable_snipmate = _vim.eval('g:UltiSnipsEnableSnipMate')
if enable_snipmate == '1':
self.register_snippet_source('snipmate_files',
SnipMateFileSource())
self._reinit()
@err_to_scratch_buffer
def jump_forwards(self):
"""Jumps to the next tabstop."""
_vim.command('let g:ulti_jump_forwards_res = 1')
_vim.command('let &undolevels = &undolevels')
if not self._jump():
_vim.command('let g:ulti_jump_forwards_res = 0')
return self._handle_failure(self.forward_trigger)
@err_to_scratch_buffer
def jump_backwards(self):
"""Jumps to the previous tabstop."""
_vim.command('let g:ulti_jump_backwards_res = 1')
_vim.command('let &undolevels = &undolevels')
if not self._jump(True):
_vim.command('let g:ulti_jump_backwards_res = 0')
return self._handle_failure(self.backward_trigger)
@err_to_scratch_buffer
def expand(self):
"""Try to expand a snippet at the current position."""
_vim.command('let g:ulti_expand_res = 1')
if not self._try_expand():
_vim.command('let g:ulti_expand_res = 0')
self._handle_failure(self.expand_trigger)
@err_to_scratch_buffer
def expand_or_jump(self):
"""This function is used for people who wants to have the same trigger
for expansion and forward jumping.
It first tries to expand a snippet, if this fails, it tries to
jump forward.
"""
_vim.command('let g:ulti_expand_or_jump_res = 1')
rv = self._try_expand()
if not rv:
_vim.command('let g:ulti_expand_or_jump_res = 2')
rv = self._jump()
if not rv:
_vim.command('let g:ulti_expand_or_jump_res = 0')
self._handle_failure(self.expand_trigger)
@err_to_scratch_buffer
def snippets_in_current_scope(self):
"""Returns the snippets that could be expanded to Vim as a global
variable."""
before = _vim.buf.line_till_cursor
snippets = self._snips(before, True)
# Sort snippets alphabetically
snippets.sort(key=lambda x: x.trigger)
for snip in snippets:
description = snip.description[snip.description.find(snip.trigger) +
len(snip.trigger) + 2:]
key = as_unicode(snip.trigger)
description = as_unicode(description)
# remove surrounding "" or '' in snippet description if it exists
if len(description) > 2:
if (description[0] == description[-1] and
description[0] in "'\""):
description = description[1:-1]
_vim.command(as_unicode(
"let g:current_ulti_dict['{key}'] = '{val}'").format(
key=key.replace("'", "''"),
val=description.replace("'", "''")))
@err_to_scratch_buffer
def list_snippets(self):
"""Shows the snippets that could be expanded to the User and let her
select one."""
before = _vim.buf.line_till_cursor
snippets = self._snips(before, True)
if len(snippets) == 0:
self._handle_failure(self.backward_trigger)
return True
# Sort snippets alphabetically
snippets.sort(key=lambda x: x.trigger)
if not snippets:
return True
snippet = _ask_snippets(snippets)
if not snippet:
return True
self._do_snippet(snippet, before)
return True
@err_to_scratch_buffer
def add_snippet(self, trigger, value, description,
options, ft='all', priority=0, context=None):
"""Add a snippet to the list of known snippets of the given 'ft'."""
self._added_snippets_source.add_snippet(ft,
UltiSnipsSnippetDefinition(priority, trigger, value,
description, options, {}, 'added',
context))
@err_to_scratch_buffer
def expand_anon(self, value, trigger='', description='', options='', context=None):
"""Expand an anonymous snippet right here."""
before = _vim.buf.line_till_cursor
snip = UltiSnipsSnippetDefinition(0, trigger, value, description,
options, {}, '', context)
if not trigger or snip.matches(before):
self._do_snippet(snip, before)
return True
else:
return False
def register_snippet_source(self, name, snippet_source):
"""Registers a new 'snippet_source' with the given 'name'.
The given class must be an instance of SnippetSource. This
source will be queried for snippets.
"""
self._snippet_sources.append((name, snippet_source))
def unregister_snippet_source(self, name):
"""Unregister the source with the given 'name'.
Does nothing if it is not registered.
"""
for index, (source_name, _) in enumerate(self._snippet_sources):
if name == source_name:
self._snippet_sources = self._snippet_sources[:index] + \
self._snippet_sources[index + 1:]
break
def reset_buffer_filetypes(self):
"""Reset the filetypes for the current buffer."""
if _vim.buf.number in self._buffer_filetypes:
del self._buffer_filetypes[_vim.buf.number]
def add_buffer_filetypes(self, ft):
"""Checks for changes in the list of snippet files or the contents of
the snippet files and reloads them if necessary."""
buf_fts = self._buffer_filetypes[_vim.buf.number]
idx = -1
for ft in ft.split('.'):
ft = ft.strip()
if not ft:
continue
try:
idx = buf_fts.index(ft)
except ValueError:
self._buffer_filetypes[_vim.buf.number].insert(idx + 1, ft)
idx += 1
@err_to_scratch_buffer
def _cursor_moved(self):
"""Called whenever the cursor moved."""
if not self._csnippets and self._inner_mappings_in_place:
self._unmap_inner_keys()
self._vstate.remember_position()
if _vim.eval('mode()') not in 'in':
return
if self._ignore_movements:
self._ignore_movements = False
return
if self._csnippets:
cstart = self._csnippets[0].start.line
cend = self._csnippets[0].end.line + \
self._vstate.diff_in_buffer_length
ct = _vim.buf[cstart:cend + 1]
lt = self._vstate.remembered_buffer
pos = _vim.buf.cursor
lt_span = [0, len(lt)]
ct_span = [0, len(ct)]
initial_line = cstart
# Cut down on lines searched for changes. Start from behind and
# remove all equal lines. Then do the same from the front.
if lt and ct:
while (lt[lt_span[1] - 1] == ct[ct_span[1] - 1] and
self._vstate.ppos.line < initial_line + lt_span[1] - 1 and
pos.line < initial_line + ct_span[1] - 1 and
(lt_span[0] < lt_span[1]) and
(ct_span[0] < ct_span[1])):
ct_span[1] -= 1
lt_span[1] -= 1
while (lt_span[0] < lt_span[1] and
ct_span[0] < ct_span[1] and
lt[lt_span[0]] == ct[ct_span[0]] and
self._vstate.ppos.line >= initial_line and
pos.line >= initial_line):
ct_span[0] += 1
lt_span[0] += 1
initial_line += 1
ct_span[0] = max(0, ct_span[0] - 1)
lt_span[0] = max(0, lt_span[0] - 1)
initial_line = max(cstart, initial_line - 1)
lt = lt[lt_span[0]:lt_span[1]]
ct = ct[ct_span[0]:ct_span[1]]
try:
rv, es = guess_edit(initial_line, lt, ct, self._vstate)
if not rv:
lt = '\n'.join(lt)
ct = '\n'.join(ct)
es = diff(lt, ct, initial_line)
self._csnippets[0].replay_user_edits(es, self._ctab)
except IndexError:
# Rather do nothing than throwing an error. It will be correct
# most of the time
pass
self._check_if_still_inside_snippet()
if self._csnippets:
self._csnippets[0].update_textobjects()
self._vstate.remember_buffer(self._csnippets[0])
def _map_inner_keys(self):
"""Map keys that should only be defined when a snippet is active."""
if self.expand_trigger != self.forward_trigger:
_vim.command('inoremap <buffer> <silent> ' + self.forward_trigger +
' <C-R>=UltiSnips#JumpForwards()<cr>')
_vim.command('snoremap <buffer> <silent> ' + self.forward_trigger +
' <Esc>:call UltiSnips#JumpForwards()<cr>')
_vim.command('inoremap <buffer> <silent> ' + self.backward_trigger +
' <C-R>=UltiSnips#JumpBackwards()<cr>')
_vim.command('snoremap <buffer> <silent> ' + self.backward_trigger +
' <Esc>:call UltiSnips#JumpBackwards()<cr>')
self._inner_mappings_in_place = True
def _unmap_inner_keys(self):
"""Unmap keys that should not be active when no snippet is active."""
if not self._inner_mappings_in_place:
return
try:
if self.expand_trigger != self.forward_trigger:
_vim.command('iunmap <buffer> %s' % self.forward_trigger)
_vim.command('sunmap <buffer> %s' % self.forward_trigger)
_vim.command('iunmap <buffer> %s' % self.backward_trigger)
_vim.command('sunmap <buffer> %s' % self.backward_trigger)
self._inner_mappings_in_place = False
except _vim.error:
# This happens when a preview window was opened. This issues
# CursorMoved, but not BufLeave. We have no way to unmap, until we
# are back in our buffer
pass
@err_to_scratch_buffer
def _save_last_visual_selection(self):
"""This is called when the expand trigger is pressed in visual mode.
Our job is to remember everything between '< and '> and pass it on to.
${VISUAL} in case it will be needed.
"""
self._visual_content.conserve()
def _leaving_buffer(self):
"""Called when the user switches tabs/windows/buffers.
It basically means that all snippets must be properly
terminated.
"""
while len(self._csnippets):
self._current_snippet_is_done()
self._reinit()
def _reinit(self):
"""Resets transient state."""
self._ctab = None
self._ignore_movements = False
def _check_if_still_inside_snippet(self):
"""Checks if the cursor is outside of the current snippet."""
if self._cs and (
not self._cs.start <= _vim.buf.cursor <= self._cs.end
):
self._current_snippet_is_done()
self._reinit()
self._check_if_still_inside_snippet()
def _current_snippet_is_done(self):
"""The current snippet should be terminated."""
self._csnippets.pop()
if not self._csnippets:
self._unmap_inner_keys()
def _jump(self, backwards=False):
"""Helper method that does the actual jump."""
jumped = False
# If next tab has length 1 and the distance between itself and
# self._ctab is 1 then there is 1 less CursorMove events. We
# cannot ignore next movement in such case.
ntab_short_and_near = False
if self._cs:
ntab = self._cs.select_next_tab(backwards)
if ntab:
if self._cs.snippet.has_option('s'):
lineno = _vim.buf.cursor.line
_vim.buf[lineno] = _vim.buf[lineno].rstrip()
_vim.select(ntab.start, ntab.end)
jumped = True
if (self._ctab is not None
and ntab.start - self._ctab.end == Position(0, 1)
and ntab.end - ntab.start == Position(0, 1)):
ntab_short_and_near = True
if ntab.number == 0:
self._current_snippet_is_done()
else:
# This really shouldn't happen, because a snippet should
# have been popped when its final tabstop was used.
# Cleanup by removing current snippet and recursing.
self._current_snippet_is_done()
jumped = self._jump(backwards)
self._ctab = ntab
if jumped:
self._vstate.remember_position()
self._vstate.remember_unnamed_register(self._ctab.current_text)
if not ntab_short_and_near:
self._ignore_movements = True
return jumped
def _leaving_insert_mode(self):
"""Called whenever we leave the insert mode."""
self._vstate.restore_unnamed_register()
def _handle_failure(self, trigger):
"""Mainly make sure that we play well with SuperTab."""
if trigger.lower() == '<tab>':
feedkey = '\\' + trigger
elif trigger.lower() == '<s-tab>':
feedkey = '\\' + trigger
else:
feedkey = None
mode = 'n'
if not self._supertab_keys:
if _vim.eval("exists('g:SuperTabMappingForward')") != '0':
self._supertab_keys = (
_vim.eval('g:SuperTabMappingForward'),
_vim.eval('g:SuperTabMappingBackward'),
)
else:
self._supertab_keys = ['', '']
for idx, sttrig in enumerate(self._supertab_keys):
if trigger.lower() == sttrig.lower():
if idx == 0:
feedkey = r"\<Plug>SuperTabForward"
mode = 'n'
elif idx == 1:
feedkey = r"\<Plug>SuperTabBackward"
mode = 'p'
# Use remap mode so SuperTab mappings will be invoked.
break
if (feedkey == r"\<Plug>SuperTabForward" or
feedkey == r"\<Plug>SuperTabBackward"):
_vim.command('return SuperTab(%s)' % _vim.escape(mode))
elif feedkey:
_vim.command('return %s' % _vim.escape(feedkey))
def _snips(self, before, partial):
"""Returns all the snippets for the given text before the cursor.
If partial is True, then get also return partial matches.
"""
filetypes = self._buffer_filetypes[_vim.buf.number][::-1]
matching_snippets = defaultdict(list)
clear_priority = None
cleared = {}
for _, source in self._snippet_sources:
source.ensure(filetypes)
# Collect cleared information from sources.
for _, source in self._snippet_sources:
sclear_priority = source.get_clear_priority(filetypes)
if sclear_priority is not None and (clear_priority is None
or sclear_priority > clear_priority):
clear_priority = sclear_priority
for key, value in source.get_cleared(filetypes).items():
if key not in cleared or value > cleared[key]:
cleared[key] = value
for _, source in self._snippet_sources:
for snippet in source.get_snippets(filetypes, before, partial):
if ((clear_priority is None or snippet.priority > clear_priority)
and (snippet.trigger not in cleared or
snippet.priority > cleared[snippet.trigger])):
matching_snippets[snippet.trigger].append(snippet)
if not matching_snippets:
return []
# Now filter duplicates and only keep the one with the highest
# priority.
snippets = []
for snippets_with_trigger in matching_snippets.values():
highest_priority = max(s.priority for s in snippets_with_trigger)
snippets.extend(s for s in snippets_with_trigger
if s.priority == highest_priority)
# For partial matches we are done, but if we want to expand a snippet,
# we have to go over them again and only keep those with the maximum
# priority.
if partial:
return snippets
highest_priority = max(s.priority for s in snippets)
return [s for s in snippets if s.priority == highest_priority]
def _do_snippet(self, snippet, before):
"""Expands the given snippet, and handles everything that needs to be
done with it."""
self._map_inner_keys()
# Adjust before, maybe the trigger is not the complete word
text_before = before
if snippet.matched:
text_before = before[:-len(snippet.matched)]
if self._cs:
start = Position(_vim.buf.cursor.line, len(text_before))
end = Position(_vim.buf.cursor.line, len(before))
# It could be that our trigger contains the content of TextObjects
# in our containing snippet. If this is indeed the case, we have to
# make sure that those are properly killed. We do this by
# pretending that the user deleted and retyped the text that our
# trigger matched.
edit_actions = [
('D', start.line, start.col, snippet.matched),
('I', start.line, start.col, snippet.matched),
]
self._csnippets[0].replay_user_edits(edit_actions)
si = snippet.launch(text_before, self._visual_content,
self._cs.find_parent_for_new_to(start), start, end)
else:
start = Position(_vim.buf.cursor.line, len(text_before))
end = Position(_vim.buf.cursor.line, len(before))
si = snippet.launch(text_before, self._visual_content,
None, start, end)
self._visual_content.reset()
self._csnippets.append(si)
si.update_textobjects()
self._vstate.remember_buffer(self._csnippets[0])
self._jump()
def _try_expand(self):
"""Try to expand a snippet in the current place."""
before = _vim.buf.line_till_cursor
if not before:
return False
snippets = self._snips(before, False)
if snippets:
# prefer snippets with context if any
snippets_with_context = [s for s in snippets if s.context]
if snippets_with_context:
snippets = snippets_with_context
if not snippets:
# No snippet found
return False
_vim.command('let &undolevels = &undolevels')
if len(snippets) == 1:
snippet = snippets[0]
else:
snippet = _ask_snippets(snippets)
if not snippet:
return True
self._do_snippet(snippet, before)
_vim.command('let &undolevels = &undolevels')
return True
@property
def _cs(self):
"""The current snippet or None."""
if not len(self._csnippets):
return None
return self._csnippets[-1]
def _file_to_edit(self, requested_ft, bang): # pylint: disable=no-self-use
"""Returns a file to be edited for the given requested_ft.
If 'bang' is
empty only private files in g:UltiSnipsSnippetsDir are considered,
otherwise all files are considered and the user gets to choose.
"""
# This method is not using self, but is called by UltiSnips.vim and is
# therefore in this class because it is the facade to Vim.
potentials = set()
if _vim.eval("exists('g:UltiSnipsSnippetsDir')") == '1':
snippet_dir = _vim.eval('g:UltiSnipsSnippetsDir')
else:
if platform.system() == 'Windows':
snippet_dir = os.path.join(_vim.eval('$HOME'),
'vimfiles', 'UltiSnips')
elif _vim.eval("has('nvim')") == '1':
snippet_dir = os.path.join(_vim.eval('$HOME'),
'.nvim', 'UltiSnips')
else:
snippet_dir = os.path.join(_vim.eval('$HOME'),
'.vim', 'UltiSnips')
filetypes = []
if requested_ft:
filetypes.append(requested_ft)
else:
if bang:
filetypes.extend(self._buffer_filetypes[_vim.buf.number])
else:
filetypes.append(self._buffer_filetypes[_vim.buf.number][0])
for ft in filetypes:
potentials.update(find_snippet_files(ft, snippet_dir))
potentials.add(os.path.join(snippet_dir,
ft + '.snippets'))
if bang:
potentials.update(find_all_snippet_files(ft))
potentials = set(os.path.realpath(os.path.expanduser(p))
for p in potentials)
if len(potentials) > 1:
files = sorted(potentials)
formatted = [as_unicode('%i: %s') % (i, escape(fn, '\\')) for
i, fn in enumerate(files, 1)]
file_to_edit = _ask_user(files, formatted)
if file_to_edit is None:
return ''
else:
file_to_edit = potentials.pop()
dirname = os.path.dirname(file_to_edit)
if not os.path.exists(dirname):
os.makedirs(dirname)
return file_to_edit
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import exp
import warnings
import numpy
from numpy import array
from pyspark import RDD, since
from pyspark.streaming import DStream
from pyspark.mllib.common import callMLlibFunc, _py2java, _java2py
from pyspark.mllib.linalg import DenseVector, SparseVector, _convert_to_vector
from pyspark.mllib.regression import (
LabeledPoint, LinearModel, _regression_train_wrapper,
StreamingLinearAlgorithm)
from pyspark.mllib.util import Saveable, Loader, inherit_doc
__all__ = ['LogisticRegressionModel', 'LogisticRegressionWithSGD', 'LogisticRegressionWithLBFGS',
'SVMModel', 'SVMWithSGD', 'NaiveBayesModel', 'NaiveBayes',
'StreamingLogisticRegressionWithSGD']
class LinearClassificationModel(LinearModel):
"""
A private abstract class representing a multiclass classification
model. The categories are represented by int values: 0, 1, 2, etc.
"""
def __init__(self, weights, intercept):
super(LinearClassificationModel, self).__init__(weights, intercept)
self._threshold = None
@since('1.4.0')
def setThreshold(self, value):
"""
Sets the threshold that separates positive predictions from
negative predictions. An example with prediction score greater
than or equal to this threshold is identified as a positive,
and negative otherwise. It is used for binary classification
only.
"""
self._threshold = value
@property
@since('1.4.0')
def threshold(self):
"""
Returns the threshold (if any) used for converting raw
prediction scores into 0/1 predictions. It is used for
binary classification only.
"""
return self._threshold
@since('1.4.0')
def clearThreshold(self):
"""
Clears the threshold so that `predict` will output raw
prediction scores. It is used for binary classification only.
"""
self._threshold = None
@since('1.4.0')
def predict(self, test):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
raise NotImplementedError
class LogisticRegressionModel(LinearClassificationModel):
"""
Classification model trained using Multinomial/Binary Logistic
Regression.
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model. (Only used in Binary Logistic
Regression. In Multinomial Logistic Regression, the intercepts will
not bea single value, so the intercepts will be part of the
weights.)
:param numFeatures:
The dimension of the features.
:param numClasses:
The number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression. By default, it is binary
logistic regression so numClasses will be set to 2.
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
>>> lrm.predict(sc.parallelize([[1.0, 0.0], [0.0, 1.0]])).collect()
[1, 0]
>>> lrm.clearThreshold()
>>> lrm.predict([0.0, 1.0])
0.279...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> lrm.predict(array([0.0, 1.0]))
1
>>> lrm.predict(array([1.0, 0.0]))
0
>>> lrm.predict(SparseVector(2, {1: 1.0}))
1
>>> lrm.predict(SparseVector(2, {0: 1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LogisticRegressionModel.load(sc, path)
>>> sameModel.predict(array([0.0, 1.0]))
1
>>> sameModel.predict(SparseVector(2, {0: 1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
>>> multi_class_data = [
... LabeledPoint(0.0, [0.0, 1.0, 0.0]),
... LabeledPoint(1.0, [1.0, 0.0, 0.0]),
... LabeledPoint(2.0, [0.0, 0.0, 1.0])
... ]
>>> data = sc.parallelize(multi_class_data)
>>> mcm = LogisticRegressionWithLBFGS.train(data, iterations=10, numClasses=3)
>>> mcm.predict([0.0, 0.5, 0.0])
0
>>> mcm.predict([0.8, 0.0, 0.0])
1
>>> mcm.predict([0.0, 0.0, 0.3])
2
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept, numFeatures, numClasses):
super(LogisticRegressionModel, self).__init__(weights, intercept)
self._numFeatures = int(numFeatures)
self._numClasses = int(numClasses)
self._threshold = 0.5
if self._numClasses == 2:
self._dataWithBiasSize = None
self._weightsMatrix = None
else:
self._dataWithBiasSize = self._coeff.size / (self._numClasses - 1)
self._weightsMatrix = self._coeff.toArray().reshape(self._numClasses - 1,
self._dataWithBiasSize)
@property
@since('1.4.0')
def numFeatures(self):
"""
Dimension of the features.
"""
return self._numFeatures
@property
@since('1.4.0')
def numClasses(self):
"""
Number of possible outcomes for k classes classification problem
in Multinomial Logistic Regression.
"""
return self._numClasses
@since('0.9.0')
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
if self.numClasses == 2:
margin = self.weights.dot(x) + self._intercept
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
if self._threshold is None:
return prob
else:
return 1 if prob > self._threshold else 0
else:
best_class = 0
max_margin = 0.0
if x.size + 1 == self._dataWithBiasSize:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i][0:x.size]) + \
self._weightsMatrix[i][x.size]
if margin > max_margin:
max_margin = margin
best_class = i + 1
else:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i])
if margin > max_margin:
max_margin = margin
best_class = i + 1
return best_class
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel(
_py2java(sc, self._coeff), self.intercept, self.numFeatures, self.numClasses)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
numFeatures = java_model.numFeatures()
numClasses = java_model.numClasses()
threshold = java_model.getThreshold().get()
model = LogisticRegressionModel(weights, intercept, numFeatures, numClasses)
model.setThreshold(threshold)
return model
class LogisticRegressionWithSGD(object):
"""
.. versionadded:: 0.9.0
.. note:: Deprecated in 2.0.0. Use ml.classification.LogisticRegression or
LogisticRegressionWithLBFGS.
"""
@classmethod
@since('0.9.0')
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=0.01, regType="l2", intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a logistic regression model on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.classification.LogisticRegression or "
"LogisticRegressionWithLBFGS.")
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithSGD", rdd, int(iterations),
float(step), float(miniBatchFraction), i, float(regParam), regType,
bool(intercept), bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class LogisticRegressionWithLBFGS(object):
"""
.. versionadded:: 1.2.0
"""
@classmethod
@since('1.2.0')
def train(cls, data, iterations=100, initialWeights=None, regParam=0.0, regType="l2",
intercept=False, corrections=10, tolerance=1e-6, validateData=True, numClasses=2):
"""
Train a logistic regression model on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param corrections:
The number of corrections used in the LBFGS update.
If a known updater is used for binary classification,
it calls the ml implementation and this parameter will
have no effect. (default: 10)
:param tolerance:
The convergence tolerance of iterations for L-BFGS.
(default: 1e-6)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param numClasses:
The number of classes (i.e., outcomes) a label can take in
Multinomial Logistic Regression.
(default: 2)
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
"""
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithLBFGS", rdd, int(iterations), i,
float(regParam), regType, bool(intercept), int(corrections),
float(tolerance), bool(validateData), int(numClasses))
if initialWeights is None:
if numClasses == 2:
initialWeights = [0.0] * len(data.first().features)
else:
if intercept:
initialWeights = [0.0] * (len(data.first().features) + 1) * (numClasses - 1)
else:
initialWeights = [0.0] * len(data.first().features) * (numClasses - 1)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights)
class SVMModel(LinearClassificationModel):
"""
Model for Support Vector Machines (SVMs).
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(data), iterations=10)
>>> svm.predict([1.0])
1
>>> svm.predict(sc.parallelize([[1.0]])).collect()
[1]
>>> svm.clearThreshold()
>>> svm.predict(array([1.0]))
1.44...
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: -1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(sparse_data), iterations=10)
>>> svm.predict(SparseVector(2, {1: 1.0}))
1
>>> svm.predict(SparseVector(2, {0: -1.0}))
0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> svm.save(sc, path)
>>> sameModel = SVMModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {1: 1.0}))
1
>>> sameModel.predict(SparseVector(2, {0: -1.0}))
0
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept):
super(SVMModel, self).__init__(weights, intercept)
self._threshold = 0.0
@since('0.9.0')
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
margin = self.weights.dot(x) + self.intercept
if self._threshold is None:
return margin
else:
return 1 if margin > self._threshold else 0
@since('1.4.0')
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel(
_py2java(sc, self._coeff), self.intercept)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.SVMModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
threshold = java_model.getThreshold().get()
model = SVMModel(weights, intercept)
model.setThreshold(threshold)
return model
class SVMWithSGD(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, data, iterations=100, step=1.0, regParam=0.01,
miniBatchFraction=1.0, initialWeights=None, regType="l2",
intercept=False, validateData=True, convergenceTol=0.001):
"""
Train a support vector machine on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regType:
The type of regularizer used for training our model.
Allowed values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e. whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
def train(rdd, i):
return callMLlibFunc("trainSVMModelWithSGD", rdd, int(iterations), float(step),
float(regParam), float(miniBatchFraction), i, regType,
bool(intercept), bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, SVMModel, data, initialWeights)
@inherit_doc
class NaiveBayesModel(Saveable, Loader):
"""
Model for Naive Bayes classifiers.
:param labels:
List of labels.
:param pi:
Log of class priors, whose dimension is C, number of labels.
:param theta:
Log of class conditional probabilities, whose dimension is C-by-D,
where D is number of features.
>>> data = [
... LabeledPoint(0.0, [0.0, 0.0]),
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> model = NaiveBayes.train(sc.parallelize(data))
>>> model.predict(array([0.0, 1.0]))
0.0
>>> model.predict(array([1.0, 0.0]))
1.0
>>> model.predict(sc.parallelize([[1.0, 0.0]])).collect()
[1.0]
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {1: 0.0})),
... LabeledPoint(0.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {0: 1.0}))
... ]
>>> model = NaiveBayes.train(sc.parallelize(sparse_data))
>>> model.predict(SparseVector(2, {1: 1.0}))
0.0
>>> model.predict(SparseVector(2, {0: 1.0}))
1.0
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = NaiveBayesModel.load(sc, path)
>>> sameModel.predict(SparseVector(2, {0: 1.0})) == model.predict(SparseVector(2, {0: 1.0}))
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 0.9.0
"""
def __init__(self, labels, pi, theta):
self.labels = labels
self.pi = pi
self.theta = theta
@since('0.9.0')
def predict(self, x):
"""
Return the most likely class for a data vector
or an RDD of vectors
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
return self.labels[numpy.argmax(self.pi + x.dot(self.theta.transpose()))]
def save(self, sc, path):
"""
Save this model to the given path.
"""
java_labels = _py2java(sc, self.labels.tolist())
java_pi = _py2java(sc, self.pi.tolist())
java_theta = _py2java(sc, self.theta.tolist())
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel(
java_labels, java_pi, java_theta)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since('1.4.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
java_model = sc._jvm.org.apache.spark.mllib.classification.NaiveBayesModel.load(
sc._jsc.sc(), path)
# Can not unpickle array.array from Pyrolite in Python3 with "bytes"
py_labels = _java2py(sc, java_model.labels(), "latin1")
py_pi = _java2py(sc, java_model.pi(), "latin1")
py_theta = _java2py(sc, java_model.theta(), "latin1")
return NaiveBayesModel(py_labels, py_pi, numpy.array(py_theta))
class NaiveBayes(object):
"""
.. versionadded:: 0.9.0
"""
@classmethod
@since('0.9.0')
def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features)
vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which
can handle all kinds of discrete data. For example, by
converting documents into TF-IDF vectors, it can be used for
document classification. By making every vector a 0-1 vector,
it can also be used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}).
The input feature values must be nonnegative.
:param data:
RDD of LabeledPoint.
:param lambda_:
The smoothing parameter.
(default: 1.0)
"""
first = data.first()
if not isinstance(first, LabeledPoint):
raise ValueError("`data` should be an RDD of LabeledPoint")
labels, pi, theta = callMLlibFunc("trainNaiveBayesModel", data, lambda_)
return NaiveBayesModel(labels.toArray(), pi.toArray(), numpy.array(theta))
@inherit_doc
class StreamingLogisticRegressionWithSGD(StreamingLinearAlgorithm):
"""
Train or predict a logistic regression model on streaming data.
Training uses Stochastic Gradient Descent to update the model based on
each new batch of incoming data from a DStream.
Each batch of data is assumed to be an RDD of LabeledPoints.
The number of data points per batch can vary, but the number
of features must be constant. An initial weight
vector must be provided.
:param stepSize:
Step size for each iteration of gradient descent.
(default: 0.1)
:param numIterations:
Number of iterations run for each batch of data.
(default: 50)
:param miniBatchFraction:
Fraction of each batch of data to use for updates.
(default: 1.0)
:param regParam:
L2 Regularization parameter.
(default: 0.0)
:param convergenceTol:
Value used to determine when to terminate iterations.
(default: 0.001)
.. versionadded:: 1.5.0
"""
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0, regParam=0.0,
convergenceTol=0.001):
self.stepSize = stepSize
self.numIterations = numIterations
self.regParam = regParam
self.miniBatchFraction = miniBatchFraction
self.convergenceTol = convergenceTol
self._model = None
super(StreamingLogisticRegressionWithSGD, self).__init__(
model=self._model)
@since('1.5.0')
def setInitialWeights(self, initialWeights):
"""
Set the initial value of weights.
This must be set before running trainOn and predictOn.
"""
initialWeights = _convert_to_vector(initialWeights)
# LogisticRegressionWithSGD does only binary classification.
self._model = LogisticRegressionModel(
initialWeights, 0, initialWeights.size, 2)
return self
@since('1.5.0')
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
# LogisticRegressionWithSGD.train raises an error for an empty RDD.
if not rdd.isEmpty():
self._model = LogisticRegressionWithSGD.train(
rdd, self.numIterations, self.stepSize,
self.miniBatchFraction, self._model.weights,
regParam=self.regParam, convergenceTol=self.convergenceTol)
dstream.foreachRDD(update)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.mllib.classification
globs = pyspark.mllib.classification.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.classification tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
|
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
# from nose import with_setup
# -*- coding: utf-8 -*-
from ..data_structures.sarray import SArray
import pandas as pd
import numpy as np
import unittest
import random
import copy
import os
import math
import array
import time
import itertools
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
class SArraySketchTest(unittest.TestCase):
def __validate_sketch_result(self, sketch, sa, delta = 1E-7):
df = pd.DataFrame(list(sa.dropna()))
pds = pd.Series(list(sa.dropna()))
if (sa.dtype() == int or sa.dtype() == float):
if (len(sa) == 0):
self.assertTrue(math.isnan(sketch.min()))
self.assertTrue(math.isnan(sketch.min()))
self.assertEquals(sketch.sum(), 0.0)
self.assertEquals(sketch.mean(), 0.0)
self.assertEquals(sketch.var(), 0.0)
self.assertEquals(sketch.std(), 0.0)
else:
self.assertEquals(sketch.min(), sa.min())
self.assertEquals(sketch.max(), sa.max())
self.assertEquals(sketch.sum(), sa.sum())
self.assertAlmostEqual(sketch.mean(), sa.dropna().mean(), delta=delta)
self.assertAlmostEqual(sketch.var(), sa.dropna().var(), delta=delta)
self.assertAlmostEqual(sketch.std(), sa.dropna().std(), delta=delta)
self.assertAlmostEqual(sketch.quantile(0.5), df.quantile(0.5)[0], delta=1)
self.assertEqual(sketch.quantile(0), df.quantile(0)[0])
self.assertEqual(sketch.quantile(1), df.quantile(1)[0])
self.assertEqual(sketch.frequent_items(), SArray(pds).sketch_summary().frequent_items())
for item in pds.value_counts().index:
self.assertEqual(sketch.frequency_count(item), pds.value_counts()[item])
self.assertAlmostEqual(sketch.num_unique(), len(sa.unique()), delta=3)
else:
with self.assertRaises(RuntimeError):
sketch.quantile((0.5))
self.assertEqual(sketch.num_undefined(), sa.num_missing())
self.assertEqual(sketch.size(), len(sa))
self.assertEqual(sketch.sketch_ready(), True)
self.assertEqual(sketch.num_elements_processed(), sketch.size())
def __validate_nested_sketch_result(self, sa):
sketch = sa.sketch_summary()
self.__validate_sketch_result(sketch, sa)
# element length summary
t = sketch.element_length_summary()
len_sa = sa.dropna().item_length()
self.__validate_sketch_result(t, len_sa)
def test_sketch_int(self):
int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, None]
sa = SArray(data=int_data)
self.__validate_sketch_result(sa.sketch_summary(), sa)
def test_sketch_float(self):
int_data = [1.2, 3,.4, 6.789, None]
sa = SArray(data=int_data)
self.__validate_sketch_result(sa.sketch_summary(), sa)
def test_vector_sketch(self):
vector_data = [[], [1,2], [3], [4,5,6,7], [8,9,10], None]
sa = SArray(data=vector_data)
sketch = sa.sketch_summary()
self.__validate_sketch_result(sketch, sa)
self.__validate_sketch_result(sketch.element_length_summary(), sa.dropna().item_length())
flattened = list(itertools.chain.from_iterable(list(sa.dropna())))
self.__validate_sketch_result(sketch.element_summary(), SArray(flattened))
fi = sketch.frequent_items()
self.assertEqual(len(fi), 5)
self.assertEqual((fi['[1 2]']), 1)
self.assertEqual((fi['[4 5 6 7]']), 1)
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys = 1).element_sub_sketch(1)
expected = sa.vector_slice(1)
self.__validate_sketch_result(s, expected)
# sub sketch with multiple keys
keys = [1,3]
s = sa.sketch_summary(sub_sketch_keys = keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(key in s)
expected = sa.vector_slice(key)
self.__validate_sketch_result(s[key], expected)
indexes = range(0,10)
s = sa.sketch_summary(sub_sketch_keys = indexes).element_sub_sketch()
self.assertEqual(len(s), len(indexes))
def test_list_sketch(self):
list_data = [[], [1,2],[1,2], ['a', 'a', 'a', 'b'], [ 1 ,1 , 2], None]
sa = SArray(list_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary()
self.assertEqual(sketch.num_unique(), 4)
element_summary = sketch.element_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dropna())))
self.__validate_sketch_result(element_summary, SArray(another_rep, str))
fi = sketch.frequent_items()
self.assertEqual(len(fi), 4)
self.assertEqual((fi['[1,2]']), 2)
self.assertEqual((fi['["a","a","a","b"]']), 1)
def test_dict_sketch_int_value(self):
dict_data = [{}, {'a':1, 'b':2}, {'a':1, 'b':2}, {'a':3, 'c':1}, {'a': 1, 'b': 2, 'c': 3}, None]
sa = SArray(data=dict_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary()
self.assertEqual(sketch.num_unique(), 4)
fi = sketch.frequent_items()
self.assertEqual(len(fi), 4)
# The order in which keys are reported is different in python2 vs python3.
# So when the dictionary is converted to a string, it results in different
# strings. Try both possible combinations for dictionary.
v = fi['{"a":1, "b":2}'] if '{"a":1, "b":2}' in fi else fi['{"b":2, "a":1}']
self.assertEqual(v, 2)
v = fi['{"a":3, "c":1}'] if '{"a":3, "c":1}' in fi else fi['{"c":1, "a":3}']
self.assertEqual(v, 1)
# Get dict key sketch
key_summary = sketch.dict_key_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_keys().dropna())))
self.__validate_sketch_result(key_summary, SArray(another_rep))
# Get dict value sketch
value_summary = sketch.dict_value_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_values().dropna())))
self.__validate_sketch_result(value_summary, SArray(another_rep))
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys ='a').element_sub_sketch('a')
expected = sa.unpack(column_name_prefix="")['a']
self.__validate_sketch_result(s, expected)
s = sa.sketch_summary(sub_sketch_keys ='Nonexist').element_sub_sketch('Nonexist')
self.assertEqual(s.num_undefined(), len(sa))
# sub sketch with multiple keys
keys = ['a', 'b']
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(key in s)
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
def test_dict_sketch_str_value(self):
# Dict value sketch type should be auto inferred
dict_data = [{'a':'b', 'b':'c'}, {'a':'b', 'b':'c'}, {'a':'d', 'b':'4'}, None]
sa = SArray(data=dict_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary()
fi = sketch.frequent_items()
self.assertEqual(len(fi), 2)
# The order in which keys are reported is different in python2 vs python3.
# So when the dictionary is converted to a string, it results in different
# strings. Try both possible combinations for dictionary.
v = fi['{"b":"c", "a":"b"}'] if '{"b":"c", "a":"b"}' in fi else fi['{"a":"b", "b":"c"}']
self.assertEqual(v, 2)
v = fi['{"a":"d", "b":"4"}'] if '{"a":"d", "b":"4"}' in fi else fi['{"b":"4", "a":"d"}']
self.assertEqual(v, 1)
# Get dict key sketch
key_summary = sketch.dict_key_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_keys().dropna())))
self.__validate_sketch_result(key_summary, SArray(another_rep))
# Get dict value sketch
value_summary = sketch.dict_value_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_values().dropna())))
self.__validate_sketch_result(value_summary, SArray(another_rep))
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys ='a').element_sub_sketch('a')
expected = sa.unpack(column_name_prefix="")['a']
self.__validate_sketch_result(s, expected)
s = sa.sketch_summary(sub_sketch_keys ='Nonexist').element_sub_sketch('Nonexist')
self.assertEqual(s.num_undefined(), len(sa))
# sub sketch with multiple keys
keys = ['a', 'b']
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(key in s)
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
# allow pass in empty keys, which will retrieve all keys
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch()
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(key in s)
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
def test_dict_many_nones(self):
sa = SArray([None] * 200 + [{'a':'b'}])
self.assertEqual(sa.sketch_summary().num_elements_processed(), 201)
def test_str_sketch(self):
str_data = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", None]
sa = SArray(data=str_data)
sketch = sa.sketch_summary()
with self.assertRaises(RuntimeError):
sketch.min()
with self.assertRaises(RuntimeError):
sketch.max()
with self.assertRaises(RuntimeError):
sketch.sum()
with self.assertRaises(RuntimeError):
sketch.mean()
with self.assertRaises(RuntimeError):
sketch.var()
with self.assertRaises(RuntimeError):
sketch.std()
self.assertAlmostEqual(sketch.num_unique(), 10, delta=3)
self.assertEqual(sketch.num_undefined(), 1)
self.assertEqual(sketch.size(), len(str_data))
with self.assertRaises(RuntimeError):
sketch.quantile(0.5)
self.assertEqual(sketch.frequency_count("1"), 1)
self.assertEqual(sketch.frequency_count("2"), 1)
t = sketch.frequent_items()
self.assertEqual(len(t), 10)
def test_empty_sketch(self):
int_data = []
sa = SArray(data=int_data)
sketch = sa.sketch_summary()
self.assertTrue(math.isnan(sketch.min()))
self.assertTrue(math.isnan(sketch.max()))
self.assertEquals(sketch.sum(), 0)
self.assertEqual(sketch.mean(), 0)
self.assertEqual(sketch.var(), 0)
self.assertEqual(sketch.std(), 0)
self.assertEqual(sketch.num_unique(), 0)
self.assertEqual(sketch.num_undefined(),0)
self.assertEqual(sketch.size(), 0)
with self.assertRaises(RuntimeError):
sketch.quantile(0.5)
t = sketch.frequent_items()
self.assertEqual(len(t), 0)
def test_large_value_sketch(self):
sa = SArray([1234567890 for i in range(100)])
sk = sa.sketch_summary()
self.__validate_sketch_result(sa.sketch_summary(), sa, 1E-5)
def test_cancelation(self):
sa = SArray(range(1,10000))
s = sa.sketch_summary(background=True)
s.cancel()
# this can be rather non-deterministic, so there is very little
# real output validation that can be done...
|
|
import os
import sys
import hashlib
import shutil
if sys.version_info[0] == 2: # pragma: no cover
from urllib import urlretrieve
from ConfigParser import RawConfigParser
else:
from urllib.request import urlretrieve
from configparser import RawConfigParser
_CONFIG_PATH = os.path.expanduser(os.path.join('~', '.batchup.cfg'))
_DEFAULT_BATCHUP_PATH = os.path.expanduser(os.path.join('~', '.batchup'))
_BATCHUP_ENV_NAME = 'BATCHUP_HOME'
_DATA_DIR_NAME = 'data'
_MAX_DOWNLOAD_TRIES = 3
_config__ = None
_data_dir_path__ = None
def get_config(): # pragma: no cover
global _config__
if _config__ is None:
if os.path.exists(_CONFIG_PATH):
try:
_config__ = RawConfigParser()
_config__.read(_CONFIG_PATH)
except Exception as e:
print('batchup: WARNING: error {} trying to open config '
'file from {}'.format(e, _CONFIG_PATH))
_config__ = RawConfigParser()
else:
_config__ = RawConfigParser()
return _config__
def get_batchup_path(): # pragma: no cover
global _data_dir_path__
if _data_dir_path__ is None:
try:
_data_dir_path__ = get_config().get('paths', 'data_dir')
except:
_data_dir_path__ = os.environ.get(_BATCHUP_ENV_NAME,
_DEFAULT_BATCHUP_PATH)
if os.path.exists(_data_dir_path__):
if not os.path.isdir(_data_dir_path__):
raise RuntimeError(
'batchup: the DATA directory path ({}) is not a '
'directory'.format(_data_dir_path__))
else:
os.makedirs(_data_dir_path__)
return _data_dir_path__
def get_data_dir():
"""
Get the batchup data directory path
Returns
-------
str
The path of the batchup data directory
"""
return os.path.join(get_batchup_path(), _DATA_DIR_NAME)
def get_data_path(filename):
"""
Get the path of the given file within the batchup data directory
Parameters
----------
filename: str
The filename to locate within the batchup data directory
Returns
-------
str
The full path of the file
"""
if os.path.isabs(filename):
return filename
else:
return os.path.join(get_data_dir(), filename)
def download(path, source_url):
"""
Download a file to a given path from a given URL, if it does not exist.
Parameters
----------
path: str
The (destination) path of the file on the local filesystem
source_url: str
The URL from which to download the file
Returns
-------
str
The path of the file
"""
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if not os.path.exists(path):
print('Downloading {} to {}'.format(source_url, path))
filename = source_url.split('/')[-1]
def _progress(count, block_size, total_size):
sys.stdout.write('\rDownloading {} {:.2%}'.format(
filename, float(count * block_size) / float(total_size)))
sys.stdout.flush()
try:
urlretrieve(source_url, path, reporthook=_progress)
except:
sys.stdout.write('\r')
# Exception; remove any partially downloaded file and re-raise
if os.path.exists(path):
os.remove(path)
raise
sys.stdout.write('\r')
return path
def compute_sha256(path):
"""
Compute the SHA-256 hash of the file at the given path
Parameters
----------
path: str
The path of the file
Returns
-------
str
The SHA-256 HEX digest
"""
hasher = hashlib.sha256()
with open(path, 'rb') as f:
# 10MB chunks
for chunk in iter(lambda: f.read(10 * 1024 * 1024), b''):
hasher.update(chunk)
return hasher.hexdigest()
def verify_file(path, sha256):
"""
Verify the integrity of a file by checking its SHA-256 hash.
If no digest is supplied, the digest is printed to the console.
Closely follows the code in `torchvision.datasets.utils.check_integrity`
Parameters
----------
path: str
The path of the file to check
sha256: str
The expected SHA-256 hex digest of the file, or `None` to print the
digest of the file to the console
Returns
-------
bool
Indicates if the file passes the integrity check or not
"""
if not os.path.isfile(path):
return False
digest = compute_sha256(path)
if sha256 is None:
# No digest supplied; report it to the console so a develop can fill
# it in
print('SHA-256 of {}:'.format(path))
print(' "{}"'.format(digest))
else:
if digest != sha256:
return False
return True
def download_and_verify(path, source_url, sha256):
"""
Download a file to a given path from a given URL, if it does not exist.
After downloading it, verify it integrity by checking the SHA-256 hash.
Parameters
----------
path: str
The (destination) path of the file on the local filesystem
source_url: str
The URL from which to download the file
sha256: str
The expected SHA-256 hex digest of the file, or `None` to print the
digest of the file to the console
Returns
-------
str or None
The path of the file if successfully downloaded otherwise `None`
"""
if os.path.exists(path):
# Already exists?
# Nothing to do, except print the SHA-256 if necessary
if sha256 is None:
print('The SHA-256 of {} is "{}"'.format(
path, compute_sha256(path)))
return path
# Compute the path of the unverified file
unverified_path = path + '.unverified'
for i in range(_MAX_DOWNLOAD_TRIES):
# Download it
try:
unverified_path = download(unverified_path, source_url)
except Exception as e:
# Report failure
print(
'Download of {} unsuccessful; error {}; '
'deleting and re-trying...'.format(source_url, e))
# Delete so that we can retry
if os.path.exists(unverified_path):
os.remove(unverified_path)
else:
if os.path.exists(unverified_path):
# Got something...
if verify_file(unverified_path, sha256):
# Success: rename the unverified file to the destination
# filename
os.rename(unverified_path, path)
return path
else:
# Report failure
print(
'Download of {} unsuccessful; verification failed; '
'deleting and re-trying...'.format(source_url))
# Delete so that we can retry
os.remove(unverified_path)
print('Did not succeed in downloading {} (tried {} times)'.format(
source_url, _MAX_DOWNLOAD_TRIES
))
return None
def copy_and_verify(path, source_path, sha256):
"""
Copy a file to a given path from a given path, if it does not exist.
After copying it, verify it integrity by checking the SHA-256 hash.
Parameters
----------
path: str
The (destination) path of the file on the local filesystem
source_path: str
The path from which to copy the file
sha256: str
The expected SHA-256 hex digest of the file, or `None` to print the
digest of the file to the console
Returns
-------
str or None
The path of the file if successfully downloaded otherwise `None`
"""
if os.path.exists(path):
# Already exists?
# Nothing to do, except print the SHA-256 if necessary
if sha256 is None:
print('The SHA-256 of {} is "{}"'.format(
path, compute_sha256(path)))
return path
if not os.path.exists(source_path):
return None
# Compute the path of the unverified file
unverified_path = path + '.unverified'
# Copy it
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
shutil.copy(source_path, unverified_path)
if os.path.exists(unverified_path):
# Got something...
if verify_file(unverified_path, sha256):
# Success: rename the unverified file to the destination
# filename
os.rename(unverified_path, path)
return path
else:
# Report failure
print('SHA verification of file {} failed'.format(source_path))
# Delete
os.remove(unverified_path)
return None
def download_data(filename, source_url, sha256):
"""
Download a file into the BatchUp data directory from a given URL,
if it does not exist.
After downloading it, verify it integrity by checking the SHA-256 hash.
Parameters
----------
path: str
The (destination) path of the file on the local filesystem
source_url: str
The URL from which to download the file
sha256: str
The expected SHA-256 hex digest of the file, or `None` to print the
digest of the file to the console
Returns
-------
str or None
The path of the file if successfully downloaded otherwise `None`
"""
return download_and_verify(get_data_path(filename), source_url, sha256)
def copy_data(filename, source_path, sha256):
"""
Copy a file into the BatchUp data directory from a given path, if it
does not exist. After copying it, verify it integrity by checking the
SHA-256 hash.
Parameters
----------
path: str
The (destination) path of the file on the local filesystem
source_path: str
The path from which to copy the file
sha256: str
The expected SHA-256 hex digest of the file, or `None` to print the
digest of the file to the console
Returns
-------
str or None
The path of the file if successfully downloaded otherwise `None`
"""
return copy_and_verify(get_data_path(filename), source_path, sha256)
|
|
"""
Progress bar
------------
"""
from __future__ import print_function, division
import warnings
from abc import abstractmethod
import datetime
from plumbum.lib import six
from plumbum.cli.termsize import get_terminal_size
import sys
class ProgressBase(six.ABC):
"""Base class for progress bars. Customize for types of progress bars.
:param iterator: The iterator to wrap with a progress bar
:param length: The length of the iterator (will use ``__len__`` if None)
:param timer: Try to time the completion status of the iterator
:param body: True if the slow portion occurs outside the iterator (in a loop, for example)
"""
def __init__(self, iterator=None, length=None, timer=True, body=False, has_output=False):
if length is None:
length = len(iterator)
elif iterator is None:
iterator = range(length)
elif length is None and iterator is None:
raise TypeError("Expected either an iterator or a length")
self.length = length
self.iterator = iterator
self.timer = timer
self.body = body
self.has_output = has_output
def __len__(self):
return self.length
def __iter__(self):
self.start()
return self
@abstractmethod
def start(self):
"""This should initialize the progress bar and the iterator"""
self.iter = iter(self.iterator)
self.value = -1 if self.body else 0
self._start_time = datetime.datetime.now()
def __next__(self):
try:
rval = next(self.iter)
self.increment()
except StopIteration:
self.done()
raise
return rval
def next(self):
return self.__next__()
@property
def value(self):
"""This is the current value, as a property so setting it can be customized"""
return self._value
@value.setter
def value(self, val):
self._value = val
@abstractmethod
def display(self):
"""Called to update the progress bar"""
pass
def increment(self):
"""Sets next value and displays the bar"""
self.value += 1
self.display()
def time_remaining(self):
"""Get the time remaining for the progress bar, guesses"""
if self.value < 1:
return None, None
elapsed_time = datetime.datetime.now() - self._start_time
time_each = (elapsed_time.days*24*60*60
+ elapsed_time.seconds
+ elapsed_time.microseconds/1000000.0) / self.value
time_remaining = time_each * (self.length - self.value)
return elapsed_time, datetime.timedelta(0,time_remaining,0)
def str_time_remaining(self):
"""Returns a string version of time remaining"""
if self.value < 1:
return "Starting... "
else:
elapsed_time, time_remaining = list(map(str,self.time_remaining()))
return "{0} completed, {1} remaining".format(elapsed_time.split('.')[0],
time_remaining.split('.')[0])
@abstractmethod
def done(self):
"""Is called when the iterator is done."""
pass
@classmethod
def range(cls, *value, **kargs):
"""Fast shortcut to create a range based progress bar, assumes work done in body"""
return cls(range(*value), body=True, **kargs)
@classmethod
def wrap(cls, iterator, length=None, **kargs):
"""Shortcut to wrap an iterator that does not do all the work internally"""
return cls(iterator, length, body = True, **kargs)
class Progress(ProgressBase):
def start(self):
super(Progress, self).start()
self.display()
def done(self):
self.value = self.length
self.display()
if self.has_output:
print()
def __str__(self):
percent = max(self.value,0)/self.length
width = get_terminal_size(default=(0,0))[0]
ending = ' ' + (self.str_time_remaining()
if self.timer else '{0} of {1} complete'.format(self.value, self.length))
if width - len(ending) < 10 or self.has_output:
self.width = 0
if self.timer:
return "{0:g}% complete: {1}".format(100*percent, self.str_time_remaining())
else:
return "{0:g}% complete".format(100*percent)
else:
self.width = width - len(ending) - 2 - 1
nstars = int(percent*self.width)
pbar = '[' + '*'*nstars + ' '*(self.width-nstars) + ']' + ending
str_percent = ' {0:.0f}% '.format(100*percent)
return pbar[:self.width//2 - 2] + str_percent + pbar[self.width//2+len(str_percent) - 2:]
def display(self):
disptxt = str(self)
if self.width == 0 or self.has_output:
print(disptxt)
else:
print("\r", end='')
print(disptxt, end='')
sys.stdout.flush()
class ProgressIPy(ProgressBase): # pragma: no cover
HTMLBOX = '<div class="widget-hbox widget-progress"><div class="widget-label" style="display:block;">{}</div></div>'
def __init__(self, *args, **kargs):
# Ipython gives warnings when using widgets about the API potentially changing
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
from ipywidgets import IntProgress, HTML, HBox
except ImportError: # Support IPython < 4.0
from IPython.html.widgets import IntProgress, HTML, HBox
super(ProgressIPy, self).__init__(*args, **kargs)
self.prog = IntProgress(max=self.length)
self._label = HTML()
self._box = HBox((self.prog, self._label))
def start(self):
from IPython.display import display
display(self._box)
super(ProgressIPy, self).start()
@property
def value(self):
"""This is the current value, -1 allowed (automatically fixed for display)"""
return self._value
@value.setter
def value(self, val):
self._value = val
self.prog.value = max(val, 0)
self.prog.description = "{0:.2f}%".format(100*self.value / self.length)
if self.timer and val > 0:
self._label.value = self.HTMLBOX.format(self.str_time_remaining())
def display(self):
pass
def done(self):
self._box.close()
class ProgressAuto(ProgressBase):
"""Automatically selects the best progress bar (IPython HTML or text). Does not work with qtconsole
(as that is correctly identified as identical to notebook, since the kernel is the same); it will still
iterate, but no graphical indication will be diplayed.
:param iterator: The iterator to wrap with a progress bar
:param length: The length of the iterator (will use ``__len__`` if None)
:param timer: Try to time the completion status of the iterator
:param body: True if the slow portion occurs outside the iterator (in a loop, for example)
"""
def __new__(cls, *args, **kargs):
"""Uses the generator trick that if a cls instance is returned, the __init__ method is not called."""
try: # pragma: no cover
__IPYTHON__
try:
from traitlets import TraitError
except ImportError: # Support for IPython < 4.0
from IPython.utils.traitlets import TraitError
try:
return ProgressIPy(*args, **kargs)
except TraitError:
raise NameError()
except (NameError, ImportError):
return Progress(*args, **kargs)
ProgressAuto.register(ProgressIPy)
ProgressAuto.register(Progress)
def main():
import time
tst = Progress.range(20)
for i in tst:
time.sleep(1)
if __name__ == '__main__':
main()
|
|
"""Tests for the Profile model and related views."""
from django.test import TestCase, Client, RequestFactory
from django.contrib.auth import get_user
from django.contrib.auth.models import User, AnonymousUser
from my_profile.models import NMHWProfile
import factory
import json
from faker import Faker
from django.forms import ModelForm
from django.urls import reverse_lazy
from bs4 import BeautifulSoup
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.files.uploadedfile import SimpleUploadedFile
import os
fake = Faker()
# Create your tests here.
class UserFactory(factory.Factory):
"""Generate new User objects."""
class Meta:
"""Define the model to base the factory on."""
model = User
username = fake.color_name()
first_name = fake.first_name()
last_name = fake.last_name()
email = fake.email()
class ProfileModelTests(TestCase):
"""Tests of the profile model object."""
def setUp(self):
"""Initial Setup."""
pass
def test_profile_is_made_when_user_is_saved(self):
"""When a user object is saved, a new profile is made."""
new_user = UserFactory.create()
new_user.save()
self.assertTrue(NMHWProfile.objects.count() == 1)
def test_only_one_profile_created_per_user(self):
"""The receiver dictates that on any save a profile is created.
Make sure that only one profile gets created.
"""
new_user = UserFactory.create()
new_user.save()
profile = NMHWProfile.objects.first()
new_user.set_password("flibbertygibbet")
new_user.save()
self.assertTrue(new_user.profile == profile)
def test_profile_has_proper_attributes(self):
"""."""
new_user = UserFactory.create()
new_user.save()
attrs = [
'user', 'photo', 'linkedin', 'github', 'twitter',
'facebook', 'instagram', 'email', 'resume', 'description'
]
for attr in attrs:
self.assertTrue(hasattr(new_user.profile, attr))
def test_profile_string_repr_is_username(self):
"""."""
new_user = UserFactory.create()
new_user.save()
self.assertEqual(str(new_user.profile), new_user.username)
class ProfileViewTests(TestCase):
"""Tests for views associated with the profile model."""
def setUp(self):
"""Set up a test user, profile, client, and requests."""
self.user = UserFactory.create()
self.user.username = "nhuntwalker"
self.user.set_password("potatoes")
self.user.save()
self.profile = self.user.profile
self.profile.photo = SimpleUploadedFile(
name='image_1.jpg',
content=open(
'rational_whimsy/static/base/imgs/computer-1.jpg',
'rb'
).read(),
content_type='image/jpeg'
)
self.profile.save()
self.client = Client()
self.request = RequestFactory()
self.get_req = RequestFactory().get("/foo_path")
def add_session_middleware(self, request):
"""Need to add session middleware for authentication."""
mdl = SessionMiddleware()
mdl.process_request(request)
request.session.save()
request.user = self.user
def test_profile_detail_view_has_details(self):
"""Information from the profile should be in the response."""
from my_profile.views import profile_detail
response = profile_detail(self.get_req)
self.assertIn(self.profile.linkedin, str(response.content))
self.assertIn(self.profile.twitter, str(response.content))
self.assertIn(self.profile.github, str(response.content))
self.assertIn(self.profile.instagram, str(response.content))
def test_profile_detail_view_accesses_profile(self):
"""All of my profile's details should be in the view's context."""
response = self.client.get(reverse_lazy("profile"))
self.assertTrue(response.context["profile"] == self.profile)
def test_profile_detail_view_accesses_correct_template(self):
"""The detail view should use the my_profile/about.html template."""
response = self.client.get(reverse_lazy("profile"))
self.assertTemplateUsed(response, "my_profile/about.html")
def test_profile_login_route_get_shows_form(self):
"""A get request to the login route shows a login form."""
response = self.client.get(reverse_lazy("login"))
html = BeautifulSoup(response.content, "html5lib")
self.assertTrue(html.find("form") is not None)
def test_profile_login_route_has_proper_input_fields(self):
"""A get request shows all the proper fields, with required parts."""
response = self.client.get(reverse_lazy("login"))
html = BeautifulSoup(response.content, "html5lib")
fields = ["username", "password"]
for field in fields:
self.assertTrue(html.find("input", {"name": field}) is not None)
btn = html.find("input", {"name": "submit"})
self.assertTrue(btn.attrs["value"] == "Log In")
def test_profile_login_route_redirects(self):
"""When logging in with good credentials we redirect."""
response = self.client.post(reverse_lazy("login"), {
"username": self.user.username,
"password": "potatoes"
})
self.assertTrue(response.status_code == 302)
def test_profile_login_route_redirects_to_home(self):
"""When logging in with good credentials we reach home page."""
response = self.client.post(reverse_lazy("login"), {
"username": self.user.username,
"password": "potatoes"
})
self.assertTrue(response.url == reverse_lazy("home_page"))
def test_profile_login_route_logs_in_users(self):
"""A logged in user is authenticated."""
self.client.post(reverse_lazy("login"), {
"username": self.user.username,
"password": "potatoes"
})
self.assertTrue(self.user.is_authenticated)
def test_profile_edit_get_is_form(self):
"""A simple get request returns a form in HTML."""
from my_profile.views import profile_edit
self.get_req.user = self.user
response = profile_edit(self.get_req)
html = BeautifulSoup(response.content, "html5lib")
self.assertTrue(len(html.find_all("form")) == 1)
def test_profile_edit_form_has_fields(self):
"""A GET request returns all the form fields."""
from my_profile.views import profile_edit
self.get_req.user = self.user
response = profile_edit(self.get_req)
html = BeautifulSoup(response.content, "html5lib")
desired_fields = ["linkedin", "github", "twitter",
"facebook", "instagram"]
for field in desired_fields:
self.assertTrue(html.find("input", {"name": field}) is not None)
self.assertTrue(html.find("textarea",
{"name": "description"}) is not None)
def test_profile_edit_response_has_form_in_context(self):
"""A get request returns a form object in the context."""
self.client.force_login(self.user)
response = self.client.get(reverse_lazy("profile_edit"))
self.assertIsInstance(response.context["form"], ModelForm)
def test_profile_edit_form_with_post_redirects_on_success(self):
"""A post request redirects on success."""
from my_profile.views import profile_edit
post_req = self.request.post("/foo_path", {
"photo": "",
"linkedin": "",
"github": "",
"twitter": "",
"facebook": "",
"instagram": "",
"description": "pancakes"
})
post_req.user = self.user
response = profile_edit(post_req)
self.assertTrue(response.status_code == 302)
def test_profile_edit_route_with_post_redirects_to_profile(self):
"""A post request redirects to the profile page."""
self.client.force_login(self.user)
response = self.client.post(reverse_lazy("profile_edit"), {
"photo": "",
"linkedin": "",
"github": "",
"twitter": "",
"facebook": "",
"instagram": "",
"description": "pancakes"
}, follow=True)
chain = response.redirect_chain
self.assertTrue(chain[0][0] == reverse_lazy("profile"))
def test_profile_edit_view_with_post_changes_model_attrs(self):
"""A post request to edit view changes the model attributes."""
from my_profile.views import profile_edit
post_req = self.request.post("/foo_path", {
"photo": "",
"linkedin": "",
"github": "",
"twitter": "",
"facebook": "",
"instagram": "",
"description": "pancakes"
})
post_req.user = self.user
profile_edit(post_req)
profile = NMHWProfile.objects.get(user__username="nhuntwalker")
self.assertTrue(profile.description == "pancakes")
def test_profile_edit_route_with_post_changes_model_attrs(self):
"""A post request to the edit route changes the model attributes."""
self.client.force_login(self.user)
self.client.post(reverse_lazy("profile_edit"), {
"photo": "",
"linkedin": "",
"github": "",
"twitter": "",
"facebook": "",
"instagram": "",
"description": "pancakes"
})
profile = NMHWProfile.objects.get(user__username="nhuntwalker")
self.assertTrue(profile.description == "pancakes")
def test_unauthenticated_user_profile_edit_route_redirects_login(self):
"""An unauthenticated user must be diverted from the edit route."""
response = self.client.get(reverse_lazy("profile_edit"), follow=True)
self.assertTrue(response.request["PATH_INFO"] == reverse_lazy("login"))
def test_authorized_user_is_logged_out(self):
"""When a user is authenticated, logging them out logs them out."""
self.client.force_login(self.user)
response = self.client.get(reverse_lazy("logout"), follow=True)
self.assertIsInstance(get_user(response.wsgi_request), AnonymousUser)
def test_call_to_github_returns_parsed_json_as_dict(self):
"""."""
from my_profile.views import get_github_info
url = 'https://api.github.com/users/nhuntwalker'
response = get_github_info(url)
self.assertIsInstance(response, dict)
def test_call_to_github_returns_user_data(self):
"""."""
from my_profile.views import get_github_info
url = 'https://api.github.com/users/nhuntwalker'
response = get_github_info(url)
self.assertEqual(
response["repos_url"],
"https://api.github.com/users/nhuntwalker/repos"
)
def test_process_github_events_returns_good_repo_count(self):
"""."""
from my_profile.views import process_github_events
path = os.path.dirname(__file__)
filepath = os.path.join(path, 'sample_github_event_json.json')
data = json.loads(open(filepath).read())
repositories = process_github_events(data)
self.assertEqual(len(repositories), 5)
def test_process_github_events_returns_repo_list(self):
"""."""
from my_profile.views import process_github_events
path = os.path.dirname(__file__)
filepath = os.path.join(path, 'sample_github_event_json.json')
data = json.loads(open(filepath).read())
repositories = process_github_events(data)
for repo in repositories:
self.assertTrue("repo_url" in repo)
|
|
# -*- coding: utf-8 -*-
# Tests ported from tests/test_models.py and tests/test_user.py
import os
import json
import datetime as dt
import urlparse
from django.utils import timezone
import mock
import itsdangerous
import pytest
import pytz
from framework.auth.exceptions import ExpiredTokenError, InvalidTokenError, ChangePasswordError
from framework.auth.signals import user_merged
from framework.analytics import get_total_activity_count
from framework.exceptions import PermissionsError
from framework.celery_tasks import handlers
from website import settings
from website import filters
from website import mailchimp_utils
from website.project.signals import contributor_added
from website.project.views.contributor import notify_added_contributor
from website.views import find_bookmark_collection
from osf.models import AbstractNode, OSFUser as User, Tag, Contributor, Session
from osf.utils.auth import Auth
from osf.utils.names import impute_names_model
from osf.exceptions import ValidationError
from osf.modm_compat import Q
from .utils import capture_signals
from .factories import (
fake,
AuthUserFactory,
CollectionFactory,
ExternalAccountFactory,
ProjectFactory,
NodeFactory,
InstitutionFactory,
SessionFactory,
UserFactory,
UnregUserFactory,
UnconfirmedUserFactory
)
from tests.base import OsfTestCase
pytestmark = pytest.mark.django_db
def test_factory():
user = UserFactory.build()
user.save()
@pytest.fixture()
def user():
return UserFactory()
@pytest.fixture()
def auth(user):
return Auth(user)
# Tests copied from tests/test_models.py
class TestOSFUser:
def test_create(self):
name, email = fake.name(), fake.email()
user = User.create(
username=email, password='foobar', fullname=name
)
# TODO: Remove me when auto_now_add is enabled (post-migration)
user.date_registered = timezone.now()
user.save()
assert user.check_password('foobar') is True
assert user._id
assert user.given_name == impute_names_model(name)['given_name']
def test_create_unconfirmed(self):
name, email = fake.name(), fake.email()
user = User.create_unconfirmed(
username=email, password='foobar', fullname=name
)
# TODO: Remove me when auto_now_add is enabled (post-migration)
user.date_registered = timezone.now()
user.save()
assert user.is_registered is False
assert len(user.email_verifications.keys()) == 1
assert len(user.emails) == 0, 'primary email has not been added to emails list'
def test_create_unconfirmed_with_campaign(self):
name, email = fake.name(), fake.email()
user = User.create_unconfirmed(
username=email, password='foobar', fullname=name,
campaign='institution'
)
assert 'institution_campaign' in user.system_tags
def test_create_unconfirmed_from_external_service(self):
name, email = fake.name(), fake.email()
external_identity = {
'ORCID': {
fake.ean(): 'CREATE'
}
}
user = User.create_unconfirmed(
username=email,
password=str(fake.password()),
fullname=name,
external_identity=external_identity,
)
user.save()
assert user.is_registered is False
assert len(user.email_verifications.keys()) == 1
assert user.email_verifications.popitem()[1]['external_identity'] == external_identity
assert len(user.emails) == 0, 'primary email has not been added to emails list'
def test_create_confirmed(self):
name, email = fake.name(), fake.email()
user = User.create_confirmed(
username=email, password='foobar', fullname=name
)
user.save()
assert user.is_registered is True
assert user.is_claimed is True
assert user.date_registered == user.date_confirmed
def test_update_guessed_names(self):
name = fake.name()
u = User(fullname=name)
u.update_guessed_names()
parsed = impute_names_model(name)
assert u.fullname == name
assert u.given_name == parsed['given_name']
assert u.middle_names == parsed['middle_names']
assert u.family_name == parsed['family_name']
assert u.suffix == parsed['suffix']
def test_create_unregistered(self):
name, email = fake.name(), fake.email()
u = User.create_unregistered(email=email,
fullname=name)
# TODO: Remove post-migration
u.date_registered = timezone.now()
u.save()
assert u.username == email
assert u.is_registered is False
assert u.is_claimed is False
assert u.is_invited is True
assert email not in u.emails
parsed = impute_names_model(name)
assert u.given_name == parsed['given_name']
@mock.patch('osf.models.user.OSFUser.update_search')
def test_search_not_updated_for_unreg_users(self, update_search):
u = User.create_unregistered(fullname=fake.name(), email=fake.email())
# TODO: Remove post-migration
u.date_registered = timezone.now()
u.save()
assert not update_search.called
@mock.patch('osf.models.OSFUser.update_search')
def test_search_updated_for_registered_users(self, update_search):
UserFactory(is_registered=True)
assert update_search.called
def test_create_unregistered_raises_error_if_already_in_db(self):
u = UnregUserFactory()
dupe = User.create_unregistered(fullname=fake.name(), email=u.username)
with pytest.raises(ValidationError):
dupe.save()
def test_merged_user_is_not_active(self):
master = UserFactory()
dupe = UserFactory(merged_by=master)
assert dupe.is_active is False
def test_non_registered_user_is_not_active(self):
u = User(username=fake.email(),
fullname='Freddie Mercury',
is_registered=False)
u.set_password('killerqueen')
u.save()
assert u.is_active is False
def test_user_with_no_password_is_invalid(self):
u = User(
username=fake.email(),
fullname='Freddie Mercury',
is_registered=True,
)
with pytest.raises(ValidationError):
u.save()
def test_merged_user_with_two_account_on_same_project_with_different_visibility_and_permissions(self, user):
user2 = UserFactory.build()
user2.save()
project = ProjectFactory(is_public=True)
# Both the master and dupe are contributors
project.add_contributor(user2, log=False)
project.add_contributor(user, log=False)
project.set_permissions(user=user, permissions=['read'])
project.set_permissions(user=user2, permissions=['read', 'write', 'admin'])
project.set_visible(user=user, visible=False)
project.set_visible(user=user2, visible=True)
project.save()
user.merge_user(user2)
user.save()
project.reload()
assert project.has_permission(user, 'admin') is True
assert project.get_visible(user) is True
assert project.is_contributor(user2) is False
def test_cant_create_user_without_username(self):
u = User() # No username given
with pytest.raises(ValidationError):
u.save()
def test_date_registered_upon_saving(self):
u = User(username=fake.email(), fullname='Foo bar')
u.set_unusable_password()
u.save()
assert bool(u.date_registered) is True
assert u.date_registered.tzinfo == pytz.utc
def test_cant_create_user_without_full_name(self):
u = User(username=fake.email())
with pytest.raises(ValidationError):
u.save()
def test_add_blacklisted_domain_unconfirmed_email(self, user):
with pytest.raises(ValidationError) as e:
user.add_unconfirmed_email('kanye@mailinator.com')
assert e.value.message == 'Invalid Email'
@mock.patch('website.security.random_string')
def test_get_confirmation_url_for_external_service(self, random_string):
random_string.return_value = 'abcde'
u = UnconfirmedUserFactory()
assert (u.get_confirmation_url(u.username, external_id_provider='service', destination='dashboard') ==
'{0}confirm/external/{1}/{2}/?destination={3}'.format(settings.DOMAIN, u._id, 'abcde', 'dashboard'))
@mock.patch('website.security.random_string')
def test_get_confirmation_token(self, random_string):
random_string.return_value = '12345'
u = UserFactory.build()
u.add_unconfirmed_email('foo@bar.com')
u.save()
assert u.get_confirmation_token('foo@bar.com') == '12345'
assert u.get_confirmation_token('fOo@bar.com') == '12345'
def test_get_confirmation_token_when_token_is_expired_raises_error(self):
u = UserFactory()
# Make sure token is already expired
expiration = timezone.now() - dt.timedelta(seconds=1)
u.add_unconfirmed_email('foo@bar.com', expiration=expiration)
with pytest.raises(ExpiredTokenError):
u.get_confirmation_token('foo@bar.com')
@mock.patch('website.security.random_string')
def test_get_confirmation_token_when_token_is_expired_force(self, random_string):
random_string.return_value = '12345'
u = UserFactory()
# Make sure token is already expired
expiration = timezone.now() - dt.timedelta(seconds=1)
u.add_unconfirmed_email('foo@bar.com', expiration=expiration)
# sanity check
with pytest.raises(ExpiredTokenError):
u.get_confirmation_token('foo@bar.com')
random_string.return_value = '54321'
token = u.get_confirmation_token('foo@bar.com', force=True)
assert token == '54321'
# Some old users will not have an 'expired' key in their email_verifications.
# Assume the token in expired
def test_get_confirmation_token_if_email_verification_doesnt_have_expiration(self):
u = UserFactory()
email = fake.email()
u.add_unconfirmed_email(email)
# manually remove 'expiration' key
token = u.get_confirmation_token(email)
del u.email_verifications[token]['expiration']
u.save()
with pytest.raises(ExpiredTokenError):
u.get_confirmation_token(email)
@mock.patch('website.security.random_string')
def test_get_confirmation_url(self, random_string):
random_string.return_value = 'abcde'
u = UserFactory()
u.add_unconfirmed_email('foo@bar.com')
assert(
u.get_confirmation_url('foo@bar.com') ==
'{0}confirm/{1}/{2}/'.format(settings.DOMAIN, u._id, 'abcde')
)
def test_get_confirmation_url_when_token_is_expired_raises_error(self):
u = UserFactory()
# Make sure token is already expired
expiration = timezone.now() - dt.timedelta(seconds=1)
u.add_unconfirmed_email('foo@bar.com', expiration=expiration)
with pytest.raises(ExpiredTokenError):
u.get_confirmation_url('foo@bar.com')
@mock.patch('website.security.random_string')
def test_get_confirmation_url_when_token_is_expired_force(self, random_string):
random_string.return_value = '12345'
u = UserFactory()
# Make sure token is already expired
expiration = timezone.now() - dt.timedelta(seconds=1)
u.add_unconfirmed_email('foo@bar.com', expiration=expiration)
# sanity check
with pytest.raises(ExpiredTokenError):
u.get_confirmation_token('foo@bar.com')
random_string.return_value = '54321'
url = u.get_confirmation_url('foo@bar.com', force=True)
expected = '{0}confirm/{1}/{2}/'.format(settings.DOMAIN, u._id, '54321')
assert url == expected
def test_confirm_primary_email(self):
u = UnconfirmedUserFactory()
token = u.get_confirmation_token(u.username)
confirmed = u.confirm_email(token)
u.save()
assert bool(confirmed) is True
assert len(u.email_verifications.keys()) == 0
assert u.username in u.emails
assert bool(u.is_registered) is True
assert bool(u.is_claimed) is True
def test_confirm_email(self, user):
token = user.add_unconfirmed_email('foo@bar.com')
user.confirm_email(token)
assert 'foo@bar.com' not in user.unconfirmed_emails
assert 'foo@bar.com' in user.emails
def test_confirm_email_comparison_is_case_insensitive(self):
u = UnconfirmedUserFactory.build(
username='letsgettacos@lgt.com'
)
u.add_unconfirmed_email('LetsGetTacos@LGT.com')
u.save()
assert bool(u.is_confirmed) is False # sanity check
token = u.get_confirmation_token('LetsGetTacos@LGT.com')
confirmed = u.confirm_email(token)
assert confirmed is True
assert u.is_confirmed is True
def test_verify_confirmation_token(self):
u = UserFactory.build()
u.add_unconfirmed_email('foo@bar.com')
u.save()
with pytest.raises(InvalidTokenError):
u.get_unconfirmed_email_for_token('badtoken')
valid_token = u.get_confirmation_token('foo@bar.com')
assert bool(u.get_unconfirmed_email_for_token(valid_token)) is True
manual_expiration = timezone.now() - dt.timedelta(0, 10)
u.email_verifications[valid_token]['expiration'] = manual_expiration
with pytest.raises(ExpiredTokenError):
u.get_unconfirmed_email_for_token(valid_token)
def test_verify_confirmation_token_when_token_has_no_expiration(self):
# A user verification token may not have an expiration
email = fake.email()
u = UserFactory.build()
u.add_unconfirmed_email(email)
token = u.get_confirmation_token(email)
# manually remove expiration to simulate legacy user
del u.email_verifications[token]['expiration']
u.save()
assert bool(u.get_unconfirmed_email_for_token(token)) is True
def test_format_surname(self):
user = UserFactory(fullname='Duane Johnson')
summary = user.get_summary(formatter='surname')
assert(
summary['user_display_name'] ==
'Johnson'
)
def test_format_surname_one_name(self):
user = UserFactory(fullname='Rock')
summary = user.get_summary(formatter='surname')
assert(
summary['user_display_name'] ==
'Rock'
)
def test_url(self, user):
assert user.url == '/{0}/'.format(user._id)
def test_absolute_url(self, user):
assert(
user.absolute_url ==
urlparse.urljoin(settings.DOMAIN, '/{0}/'.format(user._id))
)
def test_profile_image_url(self, user):
expected = filters.gravatar(
user,
use_ssl=True,
size=settings.PROFILE_IMAGE_MEDIUM
)
assert user.profile_image_url(settings.PROFILE_IMAGE_MEDIUM) == expected
def test_set_unusable_username_for_unsaved_user(self):
user = UserFactory.build()
user.set_unusable_username()
assert user.username is not None
user.save()
assert user.has_usable_username() is False
def test_set_unusable_username_for_saved_user(self):
user = UserFactory()
user.set_unusable_username()
assert user.username == user._id
def test_has_usable_username(self):
user = UserFactory()
assert user.has_usable_username() is True
user.username = user._id
assert user.has_usable_username() is False
def test_profile_image_url_has_no_default_size(self, user):
expected = filters.gravatar(
user,
use_ssl=True,
)
assert user.profile_image_url() == expected
size = urlparse.parse_qs(urlparse.urlparse(user.profile_image_url()).query).get('size')
assert size is None
def test_activity_points(self, user):
assert(
user.get_activity_points() == get_total_activity_count(user._primary_key)
)
def test_contributed_property(self):
user = UserFactory()
node = NodeFactory()
node2 = NodeFactory()
# TODO: Use Node.add_contributor when it's implemented
Contributor.objects.create(user=user, node=node)
projects_contributed_to = AbstractNode.objects.filter(_contributors=user)
assert list(user.contributed) == list(projects_contributed_to)
assert node2 not in user.contributed
# copied from tests/test_views.py
def test_clean_email_verifications(self, user):
# Do not return bad token and removes it from user.email_verifications
email = 'test@example.com'
token = 'blahblahblah'
user.email_verifications[token] = {'expiration': (timezone.now() + dt.timedelta(days=1)),
'email': email,
'confirmed': False}
user.save()
assert user.email_verifications[token]['email'] == email
user.clean_email_verifications(given_token=token)
unconfirmed_emails = user.unconfirmed_email_info
assert unconfirmed_emails == []
assert user.email_verifications == {}
def test_display_full_name_registered(self):
u = UserFactory()
assert u.display_full_name() == u.fullname
def test_display_full_name_unregistered(self):
name = fake.name()
u = UnregUserFactory()
project = NodeFactory()
project.add_unregistered_contributor(
fullname=name, email=u.username,
auth=Auth(project.creator)
)
project.save()
u.reload()
assert u.display_full_name(node=project) == name
def test_username_is_automatically_lowercased(self):
user = UserFactory(username='nEoNiCon@bet.com')
assert user.username == 'neonicon@bet.com'
def test_update_affiliated_institutions_by_email_domains(self):
institution = InstitutionFactory()
email_domain = institution.email_domains[0]
user_email = '{}@{}'.format(fake.domain_word(), email_domain)
user = UserFactory(username=user_email)
user.update_affiliated_institutions_by_email_domain()
assert user.affiliated_institutions.count() == 1
assert user.is_affiliated_with_institution(institution) is True
user.update_affiliated_institutions_by_email_domain()
assert user.affiliated_institutions.count() == 1
def test_is_affiliated_with_institution(self, user):
institution1, institution2 = InstitutionFactory(), InstitutionFactory()
user.affiliated_institutions.add(institution1)
user.save()
assert user.is_affiliated_with_institution(institution1) is True
assert user.is_affiliated_with_institution(institution2) is False
class TestProjectsInCommon:
def test_get_projects_in_common(self, user, auth):
user2 = UserFactory()
project = NodeFactory(creator=user)
project.add_contributor(contributor=user2, auth=auth)
project.save()
project_keys = set([node._id for node in user.contributed])
projects = set(user.contributed)
user2_project_keys = set([node._id for node in user2.contributed])
assert set(n._id for n in user.get_projects_in_common(user2)) == project_keys.intersection(user2_project_keys)
assert user.get_projects_in_common(user2) == projects.intersection(user2.contributed)
def test_n_projects_in_common(self, user, auth):
user2 = UserFactory()
user3 = UserFactory()
project = NodeFactory(creator=user)
project.add_contributor(contributor=user2, auth=auth)
project.save()
assert user.n_projects_in_common(user2) == 1
assert user.n_projects_in_common(user3) == 0
class TestCookieMethods:
def test_user_get_cookie(self):
user = UserFactory()
super_secret_key = 'children need maps'
signer = itsdangerous.Signer(super_secret_key)
session = Session(data={
'auth_user_id': user._id,
'auth_user_username': user.username,
'auth_user_fullname': user.fullname,
})
session.save()
assert signer.unsign(user.get_or_create_cookie(super_secret_key)) == session._id
def test_user_get_cookie_no_session(self):
user = UserFactory()
super_secret_key = 'children need maps'
signer = itsdangerous.Signer(super_secret_key)
assert(
Session.find(Q('data.auth_user_id', 'eq', user._id)).count() == 0
)
cookie = user.get_or_create_cookie(super_secret_key)
session = Session.find(Q('data.auth_user_id', 'eq', user._id))[0]
assert session._id == signer.unsign(cookie)
assert session.data['auth_user_id'] == user._id
assert session.data['auth_user_username'] == user.username
assert session.data['auth_user_fullname'] == user.fullname
def test_get_user_by_cookie(self):
user = UserFactory()
cookie = user.get_or_create_cookie()
assert user == User.from_cookie(cookie)
def test_get_user_by_cookie_returns_none(self):
assert User.from_cookie('') is None
def test_get_user_by_cookie_bad_cookie(self):
assert User.from_cookie('foobar') is None
def test_get_user_by_cookie_no_user_id(self):
user = UserFactory()
cookie = user.get_or_create_cookie()
session = Session.find_one(Q('data.auth_user_id', 'eq', user._id))
del session.data['auth_user_id']
session.save()
assert User.from_cookie(cookie) is None
def test_get_user_by_cookie_no_session(self):
user = UserFactory()
cookie = user.get_or_create_cookie()
Session.objects.all().delete()
assert User.from_cookie(cookie) is None
class TestChangePassword:
def test_change_password(self, user):
old_password = 'password'
new_password = 'new password'
confirm_password = new_password
user.set_password(old_password)
user.save()
user.change_password(old_password, new_password, confirm_password)
assert bool(user.check_password(new_password)) is True
@mock.patch('website.mails.send_mail')
def test_set_password_notify_default(self, mock_send_mail, user):
old_password = 'password'
user.set_password(old_password)
user.save()
assert mock_send_mail.called is True
@mock.patch('website.mails.send_mail')
def test_set_password_no_notify(self, mock_send_mail, user):
old_password = 'password'
user.set_password(old_password, notify=False)
user.save()
assert mock_send_mail.called is False
@mock.patch('website.mails.send_mail')
def test_check_password_upgrade_hasher_no_notify(self, mock_send_mail, user):
raw_password = 'password'
user.password = 'sha1$lNb72DKWDv6P$e6ae16dada9303ae0084e14fc96659da4332bb05'
user.check_password(raw_password)
assert user.password.startswith('md5$')
assert mock_send_mail.called is False
def test_change_password_invalid(self, old_password=None, new_password=None, confirm_password=None,
error_message='Old password is invalid'):
user = UserFactory()
user.set_password('password')
user.save()
with pytest.raises(ChangePasswordError) as excinfo:
user.change_password(old_password, new_password, confirm_password)
user.save()
assert error_message in excinfo.value.message
assert bool(user.check_password(new_password)) is False
def test_change_password_invalid_old_password(self):
self.test_change_password_invalid(
'invalid old password',
'new password',
'new password',
'Old password is invalid',
)
def test_change_password_invalid_new_password_length(self):
self.test_change_password_invalid(
'password',
'12345',
'12345',
'Password should be at least eight characters',
)
def test_change_password_invalid_confirm_password(self):
self.test_change_password_invalid(
'password',
'new password',
'invalid confirm password',
'Password does not match the confirmation',
)
def test_change_password_invalid_blank_password(self, old_password='', new_password='', confirm_password=''):
self.test_change_password_invalid(
old_password,
new_password,
confirm_password,
'Passwords cannot be blank',
)
def test_change_password_invalid_blank_new_password(self):
for password in (None, '', ' '):
self.test_change_password_invalid_blank_password('password', password, 'new password')
def test_change_password_invalid_blank_confirm_password(self):
for password in (None, '', ' '):
self.test_change_password_invalid_blank_password('password', 'new password', password)
class TestIsActive:
@pytest.fixture()
def make_user(self):
def func(**attrs):
# By default, return an active user
user = UserFactory.build(
is_registered=True,
merged_by=None,
is_disabled=False,
date_confirmed=timezone.now(),
)
user.set_password('secret')
for attr, value in attrs.items():
setattr(user, attr, value)
return user
return func
def test_is_active_is_set_to_true_under_correct_conditions(self, make_user):
user = make_user()
user.save()
assert user.is_active is True
def test_is_active_is_false_if_not_registered(self, make_user):
user = make_user(is_registered=False)
user.save()
assert user.is_active is False
def test_is_active_is_false_if_not_confirmed(self, make_user):
user = make_user(date_confirmed=None)
user.save()
assert user.is_active is False
def test_is_active_is_false_if_password_unset(self, make_user):
user = make_user()
user.set_unusable_password()
user.save()
assert user.is_active is False
def test_is_active_is_false_if_merged(self, make_user):
merger = UserFactory()
user = make_user(merged_by=merger)
user.save()
assert user.is_active is False
def test_is_active_is_false_if_disabled(self, make_user):
user = make_user(date_disabled=timezone.now())
assert user.is_active is False
class TestAddUnconfirmedEmail:
@mock.patch('website.security.random_string')
def test_add_unconfirmed_email(self, random_string):
token = fake.lexify('???????')
random_string.return_value = token
u = UserFactory()
assert len(u.email_verifications.keys()) == 0
u.add_unconfirmed_email('foo@bar.com')
assert len(u.email_verifications.keys()) == 1
assert u.email_verifications[token]['email'] == 'foo@bar.com'
@mock.patch('website.security.random_string')
def test_add_unconfirmed_email_adds_expiration_date(self, random_string):
token = fake.lexify('???????')
random_string.return_value = token
u = UserFactory()
u.add_unconfirmed_email('test@osf.io')
assert isinstance(u.email_verifications[token]['expiration'], dt.datetime)
def test_add_blank_unconfirmed_email(self):
user = UserFactory()
with pytest.raises(ValidationError) as exc_info:
user.add_unconfirmed_email('')
assert exc_info.value.message == 'Enter a valid email address.'
# Copied from tests/test_models.TestUnregisteredUser
class TestUnregisteredUser:
@pytest.fixture()
def referrer(self):
return UserFactory()
@pytest.fixture()
def email(self):
return fake.email()
@pytest.fixture()
def unreg_user(self, referrer, project, email):
user = UnregUserFactory()
given_name = 'Fredd Merkury'
user.add_unclaimed_record(node=project,
given_name=given_name, referrer=referrer,
email=email)
user.save()
return user
@pytest.fixture()
def project(self, referrer):
return NodeFactory(creator=referrer)
def test_unregistered_factory(self):
u1 = UnregUserFactory()
assert bool(u1.is_registered) is False
assert u1.has_usable_password() is False
assert bool(u1.fullname) is True
def test_unconfirmed_factory(self):
u = UnconfirmedUserFactory()
assert bool(u.is_registered) is False
assert bool(u.username) is True
assert bool(u.fullname) is True
assert bool(u.password) is True
assert len(u.email_verifications.keys()) == 1
def test_add_unclaimed_record(self, unreg_user, email, referrer, project):
data = unreg_user.unclaimed_records[project._primary_key]
assert data['name'] == 'Fredd Merkury'
assert data['referrer_id'] == referrer._id
assert 'token' in data
assert data['email'] == email
assert data == unreg_user.get_unclaimed_record(project._primary_key)
def test_get_claim_url(self, unreg_user, project):
uid = unreg_user._primary_key
pid = project._primary_key
token = unreg_user.get_unclaimed_record(pid)['token']
domain = settings.DOMAIN
assert (
unreg_user.get_claim_url(pid, external=True) ==
'{domain}user/{uid}/{pid}/claim/?token={token}'.format(**locals())
)
def test_get_claim_url_raises_value_error_if_not_valid_pid(self, unreg_user):
with pytest.raises(ValueError):
unreg_user.get_claim_url('invalidinput')
def test_cant_add_unclaimed_record_if_referrer_isnt_contributor(self, referrer, unreg_user):
project = NodeFactory() # referrer isn't a contributor to this project
with pytest.raises(PermissionsError):
unreg_user.add_unclaimed_record(node=project,
given_name='fred m', referrer=referrer)
unreg_user.save()
@mock.patch('osf.models.OSFUser.update_search_nodes')
@mock.patch('osf.models.OSFUser.update_search')
def test_register(self, mock_search, mock_search_nodes):
user = UnregUserFactory()
assert user.is_registered is False # sanity check
assert user.is_claimed is False
email = fake.email()
user.register(username=email, password='killerqueen')
user.save()
assert user.is_claimed is True
assert user.is_registered is True
assert user.check_password('killerqueen') is True
assert user.username == email
@mock.patch('osf.models.OSFUser.update_search_nodes')
@mock.patch('osf.models.OSFUser.update_search')
def test_registering_with_a_different_email_adds_to_emails_list(self, mock_search, mock_search_nodes):
user = UnregUserFactory()
assert user.has_usable_password() is False # sanity check
email = fake.email()
user.register(username=email, password='killerqueen')
assert email in user.emails
def test_verify_claim_token(self, unreg_user, project):
valid = unreg_user.get_unclaimed_record(project._primary_key)['token']
assert bool(unreg_user.verify_claim_token(valid, project_id=project._primary_key)) is True
assert bool(unreg_user.verify_claim_token('invalidtoken', project_id=project._primary_key)) is False
def test_verify_claim_token_with_no_expiration_date(self, unreg_user, project):
# Legacy records may not have an 'expires' key
#self.add_unclaimed_record()
record = unreg_user.get_unclaimed_record(project._primary_key)
del record['expires']
unreg_user.save()
token = record['token']
assert unreg_user.verify_claim_token(token, project_id=project._primary_key) is True
# Copied from tests/test_models.py
class TestRecentlyAdded:
def test_recently_added(self, user, auth):
# Project created
project = NodeFactory()
assert hasattr(user, 'recently_added') is True
# Two users added as contributors
user2 = UserFactory()
user3 = UserFactory()
project.add_contributor(contributor=user2, auth=auth)
project.add_contributor(contributor=user3, auth=auth)
recently_added = list(user.get_recently_added())
assert user3 == recently_added[0]
assert user2 == recently_added[1]
assert len(list(recently_added)) == 2
def test_recently_added_multi_project(self, user, auth):
# Three users are created
user2 = UserFactory()
user3 = UserFactory()
user4 = UserFactory()
# 2 projects created
project = NodeFactory()
project2 = NodeFactory()
# Users 2 and 3 are added to original project
project.add_contributor(contributor=user2, auth=auth)
project.add_contributor(contributor=user3, auth=auth)
# Users 2 and 3 are added to another project
project2.add_contributor(contributor=user2, auth=auth)
project2.add_contributor(contributor=user4, auth=auth)
recently_added = list(user.get_recently_added())
assert user4 == recently_added[0]
assert user2 == recently_added[1]
assert user3 == recently_added[2]
assert len(recently_added) == 3
def test_recently_added_length(self, user, auth):
# Project created
project = NodeFactory()
assert len(list(user.get_recently_added())) == 0
# Add 17 users
for _ in range(17):
project.add_contributor(
contributor=UserFactory(),
auth=auth
)
assert len(list(user.get_recently_added())) == 15
# New tests
class TestTagging:
def test_add_system_tag(self, user):
tag_name = fake.word()
user.add_system_tag(tag_name)
user.save()
assert len(user.system_tags) == 1
tag = Tag.objects.get(name=tag_name, system=True)
assert tag in user.tags.all()
def test_tags_get_lowercased(self, user):
tag_name = 'NeOn'
user.add_system_tag(tag_name)
user.save()
tag = Tag.objects.get(name=tag_name.lower(), system=True)
assert tag in user.tags.all()
def test_system_tags_property(self, user):
tag_name = fake.word()
user.add_system_tag(tag_name)
assert tag_name in user.system_tags
class TestCitationProperties:
def test_user_csl(self, user):
# Convert a User instance to csl's name-variable schema
assert bool(
user.csl_name ==
{
'given': user.given_name,
'family': user.family_name,
}
)
# copied from tests/test_models.py
class TestMergingUsers:
@pytest.fixture()
def master(self):
return UserFactory(
fullname='Joe Shmo',
is_registered=True,
emails=['joe@mail.com'],
)
@pytest.fixture()
def dupe(self):
return UserFactory(
fullname='Joseph Shmo',
emails=['joseph123@hotmail.com']
)
@pytest.fixture()
def merge_dupe(self, master, dupe):
def f():
'''Do the actual merge.'''
master.merge_user(dupe)
master.save()
return f
def test_bookmark_collection_nodes_arent_merged(self, dupe, master, merge_dupe):
dashnode = find_bookmark_collection(dupe)
assert dashnode in dupe.contributed
merge_dupe()
assert dashnode not in master.contributed
def test_dupe_is_merged(self, dupe, master, merge_dupe):
merge_dupe()
assert dupe.is_merged
assert dupe.merged_by == master
def test_dupe_email_is_appended(self, master, merge_dupe):
merge_dupe()
assert 'joseph123@hotmail.com' in master.emails
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_send_user_merged_signal(self, mock_get_mailchimp_api, dupe, merge_dupe):
dupe.mailchimp_mailing_lists['foo'] = True
dupe.save()
with capture_signals() as mock_signals:
merge_dupe()
assert mock_signals.signals_sent() == set([user_merged])
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_merged_user_unsubscribed_from_mailing_lists(self, mock_get_mailchimp_api, dupe, merge_dupe, request_context):
list_name = 'foo'
username = dupe.username
dupe.mailchimp_mailing_lists[list_name] = True
dupe.save()
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': 2, 'list_name': list_name}]}
list_id = mailchimp_utils.get_list_id_from_name(list_name)
merge_dupe()
handlers.celery_teardown_request()
dupe.reload()
mock_client.lists.unsubscribe.assert_called_with(id=list_id, email={'email': username}, send_goodbye=False)
assert dupe.mailchimp_mailing_lists[list_name] is False
def test_inherits_projects_contributed_by_dupe(self, dupe, master, merge_dupe):
project = ProjectFactory()
project.add_contributor(dupe)
project.save()
merge_dupe()
project.reload()
assert project.is_contributor(master) is True
assert project.is_contributor(dupe) is False
def test_inherits_projects_created_by_dupe(self, dupe, master, merge_dupe):
project = ProjectFactory(creator=dupe)
merge_dupe()
project.reload()
assert project.creator == master
def test_adding_merged_user_as_contributor_adds_master(self, dupe, master, merge_dupe):
project = ProjectFactory(creator=UserFactory())
merge_dupe()
project.add_contributor(contributor=dupe)
assert project.is_contributor(master) is True
assert project.is_contributor(dupe) is False
def test_merging_dupe_who_is_contributor_on_same_projects(self, master, dupe, merge_dupe):
# Both master and dupe are contributors on the same project
project = ProjectFactory()
project.add_contributor(contributor=master, visible=True)
project.add_contributor(contributor=dupe, visible=True)
project.save()
merge_dupe() # perform the merge
project.reload()
assert project.is_contributor(master)
assert project.is_contributor(dupe) is False
assert len(project.contributors) == 2 # creator and master are the only contribs
assert project.contributor_set.get(user=master).visible is True
def test_merging_dupe_who_has_different_visibility_from_master(self, master, dupe, merge_dupe):
# Both master and dupe are contributors on the same project
project = ProjectFactory()
project.add_contributor(contributor=master, visible=False)
project.add_contributor(contributor=dupe, visible=True)
project.save()
merge_dupe() # perform the merge
project.reload()
assert project.contributor_set.get(user=master).visible is True
def test_merging_dupe_who_is_a_non_bib_contrib_and_so_is_the_master(self, master, dupe, merge_dupe):
# Both master and dupe are contributors on the same project
project = ProjectFactory()
project.add_contributor(contributor=master, visible=False)
project.add_contributor(contributor=dupe, visible=False)
project.save()
merge_dupe() # perform the merge
project.reload()
assert project.contributor_set.get(user=master).visible is False
def test_merge_user_with_higher_permissions_on_project(self, master, dupe, merge_dupe):
# Both master and dupe are contributors on the same project
project = ProjectFactory()
project.add_contributor(contributor=master, permissions=('read', 'write'))
project.add_contributor(contributor=dupe, permissions=('read', 'write', 'admin'))
project.save()
merge_dupe() # perform the merge
assert project.get_permissions(master) == ['read', 'write', 'admin']
def test_merge_user_with_lower_permissions_on_project(self, master, dupe, merge_dupe):
# Both master and dupe are contributors on the same project
project = ProjectFactory()
project.add_contributor(contributor=master, permissions=('read', 'write', 'admin'))
project.add_contributor(contributor=dupe, permissions=('read', 'write'))
project.save()
merge_dupe() # perform the merge
assert project.get_permissions(master) == ['read', 'write', 'admin']
class TestDisablingUsers(OsfTestCase):
def setUp(self):
super(TestDisablingUsers, self).setUp()
self.user = UserFactory()
def test_user_enabled_by_default(self):
assert self.user.is_disabled is False
def test_disabled_user(self):
"""Ensure disabling a user sets date_disabled"""
self.user.is_disabled = True
self.user.save()
assert isinstance(self.user.date_disabled, dt.datetime)
assert self.user.is_disabled is True
assert self.user.is_active is False
def test_reenabled_user(self):
"""Ensure restoring a disabled user unsets date_disabled"""
self.user.is_disabled = True
self.user.save()
self.user.is_disabled = False
self.user.save()
assert self.user.date_disabled is None
assert self.user.is_disabled is False
assert self.user.is_active is True
def test_is_disabled_idempotency(self):
self.user.is_disabled = True
self.user.save()
old_date_disabled = self.user.date_disabled
self.user.is_disabled = True
self.user.save()
new_date_disabled = self.user.date_disabled
assert new_date_disabled == old_date_disabled
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_disable_account_and_remove_sessions(self, mock_mail):
session1 = SessionFactory(user=self.user, date_created=(timezone.now() - dt.timedelta(seconds=settings.OSF_SESSION_TIMEOUT)))
session2 = SessionFactory(user=self.user, date_created=(timezone.now() - dt.timedelta(seconds=settings.OSF_SESSION_TIMEOUT)))
self.user.mailchimp_mailing_lists[settings.MAILCHIMP_GENERAL_LIST] = True
self.user.save()
self.user.disable_account()
assert self.user.is_disabled is True
assert isinstance(self.user.date_disabled, dt.datetime)
assert self.user.mailchimp_mailing_lists[settings.MAILCHIMP_GENERAL_LIST] is False
assert not Session.load(session1._id)
assert not Session.load(session2._id)
def test_disable_account_api(self):
settings.ENABLE_EMAIL_SUBSCRIPTIONS = True
with pytest.raises(mailchimp_utils.mailchimp.InvalidApiKeyError):
self.user.disable_account()
# Copied from tests/modes/test_user.py
class TestUser(OsfTestCase):
def setUp(self):
super(TestUser, self).setUp()
self.user = AuthUserFactory()
def tearDown(self):
AbstractNode.remove()
User.remove()
Session.remove()
super(TestUser, self).tearDown()
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/2454
def test_add_unconfirmed_email_when_email_verifications_is_empty(self):
self.user.email_verifications = []
self.user.save()
email = fake.email()
self.user.add_unconfirmed_email(email)
self.user.save()
assert email in self.user.unconfirmed_emails
def test_unconfirmed_emails(self):
assert self.user.unconfirmed_emails == []
self.user.add_unconfirmed_email('foo@bar.com')
assert self.user.unconfirmed_emails == ['foo@bar.com']
# email_verifications field may NOT be None
self.user.email_verifications = []
self.user.save()
assert self.user.unconfirmed_emails == []
def test_unconfirmed_emails_unregistered_user(self):
assert UnregUserFactory().unconfirmed_emails == []
def test_unconfirmed_emails_unconfirmed_user(self):
user = UnconfirmedUserFactory()
assert user.unconfirmed_emails == [user.username]
# regression test for https://sentry.cos.io/sentry/osf/issues/6510/
def test_unconfirmed_email_info_when_email_verifications_is_empty(self):
user = UserFactory()
user.email_verifications = []
assert user.unconfirmed_email_info == []
def test_remove_unconfirmed_email(self):
self.user.add_unconfirmed_email('foo@bar.com')
self.user.save()
assert 'foo@bar.com' in self.user.unconfirmed_emails # sanity check
self.user.remove_unconfirmed_email('foo@bar.com')
self.user.save()
assert 'foo@bar.com' not in self.user.unconfirmed_emails
def test_confirm_email(self):
token = self.user.add_unconfirmed_email('foo@bar.com')
self.user.confirm_email(token)
assert 'foo@bar.com' not in self.user.unconfirmed_emails
assert 'foo@bar.com' in self.user.emails
def test_confirm_email_comparison_is_case_insensitive(self):
u = UnconfirmedUserFactory.build(
username='letsgettacos@lgt.com'
)
u.add_unconfirmed_email('LetsGetTacos@LGT.com')
u.save()
assert u.is_confirmed is False # sanity check
token = u.get_confirmation_token('LetsGetTacos@LGT.com')
confirmed = u.confirm_email(token)
assert confirmed is True
assert u.is_confirmed is True
def test_cannot_remove_primary_email_from_email_list(self):
with pytest.raises(PermissionsError) as e:
self.user.remove_email(self.user.username)
assert e.value.message == "Can't remove primary email"
def test_add_same_unconfirmed_email_twice(self):
email = "test@mail.com"
token1 = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert token1 == self.user.get_confirmation_token(email)
assert email == self.user.get_unconfirmed_email_for_token(token1)
token2 = self.user.add_unconfirmed_email(email)
self.user.save()
self.user.reload()
assert token1 != self.user.get_confirmation_token(email)
assert token2 == self.user.get_confirmation_token(email)
assert email == self.user.get_unconfirmed_email_for_token(token2)
with pytest.raises(InvalidTokenError):
self.user.get_unconfirmed_email_for_token(token1)
def test_contributed_property(self):
projects_contributed_to = AbstractNode.find(Q('contributors', 'eq', self.user))
assert list(self.user.contributed.all()) == list(projects_contributed_to)
def test_contributor_to_property(self):
normal_node = ProjectFactory(creator=self.user)
normal_contributed_node = ProjectFactory()
normal_contributed_node.add_contributor(self.user)
normal_contributed_node.save()
deleted_node = ProjectFactory(creator=self.user, is_deleted=True)
bookmark_collection_node = find_bookmark_collection(self.user)
collection_node = CollectionFactory(creator=self.user)
project_to_be_invisible_on = ProjectFactory()
project_to_be_invisible_on.add_contributor(self.user, visible=False)
project_to_be_invisible_on.save()
contributor_to_nodes = [node._id for node in self.user.contributor_to]
assert normal_node._id in contributor_to_nodes
assert normal_contributed_node._id in contributor_to_nodes
assert project_to_be_invisible_on._id in contributor_to_nodes
assert deleted_node._id not in contributor_to_nodes
assert bookmark_collection_node._id not in contributor_to_nodes
assert collection_node._id not in contributor_to_nodes
def test_visible_contributor_to_property(self):
invisible_contributor = UserFactory()
normal_node = ProjectFactory(creator=invisible_contributor)
deleted_node = ProjectFactory(creator=invisible_contributor, is_deleted=True)
bookmark_collection_node = find_bookmark_collection(invisible_contributor)
collection_node = CollectionFactory(creator=invisible_contributor)
project_to_be_invisible_on = ProjectFactory()
project_to_be_invisible_on.add_contributor(invisible_contributor, visible=False)
project_to_be_invisible_on.save()
visible_contributor_to_nodes = [node._id for node in invisible_contributor.visible_contributor_to]
assert normal_node._id in visible_contributor_to_nodes
assert deleted_node._id not in visible_contributor_to_nodes
assert bookmark_collection_node._id not in visible_contributor_to_nodes
assert collection_node._id not in visible_contributor_to_nodes
assert project_to_be_invisible_on._id not in visible_contributor_to_nodes
def test_created_property(self):
# make sure there's at least one project
ProjectFactory(creator=self.user)
projects_created_by_user = AbstractNode.find(Q('creator', 'eq', self.user))
assert list(self.user.created.all()) == list(projects_created_by_user)
# Copied from tests/models/test_user.py
class TestUserMerging(OsfTestCase):
def setUp(self):
super(TestUserMerging, self).setUp()
self.user = UserFactory()
with self.context:
handlers.celery_before_request()
def _add_unconfirmed_user(self):
self.unconfirmed = UnconfirmedUserFactory()
self.user.add_system_tag('user')
self.user.add_system_tag('shared')
self.unconfirmed.add_system_tag('unconfirmed')
self.unconfirmed.add_system_tag('shared')
def _add_unregistered_user(self):
self.unregistered = UnregUserFactory()
self.project_with_unreg_contrib = ProjectFactory()
self.project_with_unreg_contrib.add_unregistered_contributor(
fullname='Unreg',
email=self.unregistered.username,
auth=Auth(self.project_with_unreg_contrib.creator)
)
self.project_with_unreg_contrib.save()
@mock.patch('website.mailchimp_utils.get_mailchimp_api')
def test_merge(self, mock_get_mailchimp_api):
def is_mrm_field(value):
return 'ManyRelatedManager' in str(value.__class__)
other_user = UserFactory()
other_user.save()
# define values for users' fields
today = timezone.now()
yesterday = today - dt.timedelta(days=1)
self.user.comments_viewed_timestamp['shared_gt'] = today
other_user.comments_viewed_timestamp['shared_gt'] = yesterday
self.user.comments_viewed_timestamp['shared_lt'] = yesterday
other_user.comments_viewed_timestamp['shared_lt'] = today
self.user.comments_viewed_timestamp['user'] = yesterday
other_user.comments_viewed_timestamp['other'] = yesterday
self.user.email_verifications = {'user': {'email': 'a'}}
other_user.email_verifications = {'other': {'email': 'b'}}
self.user.notifications_configured = {'abc12': True}
other_user.notifications_configured = {'123ab': True}
self.user.external_accounts = [ExternalAccountFactory()]
other_user.external_accounts = [ExternalAccountFactory()]
self.user.mailchimp_mailing_lists = {
'user': True,
'shared_gt': True,
'shared_lt': False,
}
other_user.mailchimp_mailing_lists = {
'other': True,
'shared_gt': False,
'shared_lt': True,
}
self.user.security_messages = {
'user': today,
'shared': today,
}
other_user.security_messages = {
'other': today,
'shared': today,
}
self.user.add_system_tag('user')
self.user.add_system_tag('shared')
other_user.add_system_tag('other')
other_user.add_system_tag('shared')
self.user.save()
other_user.save()
# define expected behavior for ALL FIELDS of the User object
default_to_master_user_fields = [
'id',
'date_confirmed',
'date_disabled',
'date_last_login',
'date_registered',
'email_last_sent',
'external_identity',
'family_name',
'fullname',
'given_name',
'is_claimed',
'is_invited',
'is_registered',
'jobs',
'locale',
'merged_by',
'middle_names',
'password',
'recently_added',
'schools',
'social',
'suffix',
'timezone',
'username',
'mailing_lists',
'verification_key',
'verification_key_v2',
'affiliated_institutions',
'contributor_added_email_records',
'requested_deactivation',
]
calculated_fields = {
'comments_viewed_timestamp': {
'user': yesterday,
'other': yesterday,
'shared_gt': today,
'shared_lt': today,
},
'email_verifications': {
'user': {'email': 'a'},
'other': {'email': 'b'},
},
'notifications_configured': {
'123ab': True, 'abc12': True,
},
'emails': [
self.user.username,
other_user.username,
],
'external_accounts': [
self.user.external_accounts.first().id,
other_user.external_accounts.first().id,
],
'mailchimp_mailing_lists': {
'user': True,
'other': True,
'shared_gt': True,
'shared_lt': True,
},
'osf_mailing_lists': {
'Open Science Framework Help': True
},
'security_messages': {
'user': today,
'other': today,
'shared': today,
},
'tags': [Tag.load('user', system=True).id, Tag.load('shared', system=True).id, Tag.load('other', system=True).id],
'unclaimed_records': {},
}
# from the explicit rules above, compile expected field/value pairs
expected = {}
expected.update(calculated_fields)
for key in default_to_master_user_fields:
if is_mrm_field(getattr(self.user, key)):
expected[key] = list(getattr(self.user, key).all().values_list('id', flat=True))
else:
expected[key] = getattr(self.user, key)
# ensure all fields of the user object have an explicit expectation
assert set(expected.keys()).issubset(set(self.user._meta.get_all_field_names()))
# mock mailchimp
mock_client = mock.MagicMock()
mock_get_mailchimp_api.return_value = mock_client
mock_client.lists.list.return_value = {'data': [{'id': x, 'list_name': list_name} for x, list_name in enumerate(self.user.mailchimp_mailing_lists)]}
# perform the merge
self.user.merge_user(other_user)
self.user.save()
handlers.celery_teardown_request()
self.user.reload()
# check each field/value pair
for k, v in expected.iteritems():
if is_mrm_field(getattr(self.user, k)):
assert list(getattr(self.user, k).all().values_list('id', flat=True)) == v, '{} doesn\'t match expectations'.format(k)
else:
assert getattr(self.user, k) == v, '{} doesn\'t match expectation'.format(k)
# check fields set on merged user
assert other_user.merged_by == self.user
assert Session.find(Q('data.auth_user_id', 'eq', other_user._id)).count() == 0
def test_merge_unconfirmed(self):
self._add_unconfirmed_user()
unconfirmed_username = self.unconfirmed.username
self.user.merge_user(self.unconfirmed)
assert self.unconfirmed.is_merged is True
assert self.unconfirmed.merged_by == self.user
assert self.user.is_claimed is True
assert self.user.is_invited is False
# TODO: test profile fields - jobs, schools, social
# TODO: test security_messages
# TODO: test mailing_lists
assert sorted(self.user.system_tags) == sorted(['shared', 'user', 'unconfirmed'])
# TODO: test emails
# TODO: test external_accounts
assert self.unconfirmed.email_verifications == {}
assert self.unconfirmed.password[0] == '!'
assert self.unconfirmed.verification_key is None
# The mergee's email no longer needs to be confirmed by merger
unconfirmed_emails = [record['email'] for record in self.user.email_verifications.values()]
assert unconfirmed_username not in unconfirmed_emails
def test_merge_preserves_external_identity(self):
verified_user = UserFactory(external_identity={'ORCID': {'1234-1234-1234-1234': 'VERIFIED'}})
linking_user = UserFactory(external_identity={'ORCID': {'1234-1234-1234-1234': 'LINK'}})
creating_user = UserFactory(external_identity={'ORCID': {'1234-1234-1234-1234': 'CREATE'}})
different_id_user = UserFactory(external_identity={'ORCID': {'4321-4321-4321-4321': 'VERIFIED'}})
no_id_user = UserFactory(external_identity={'ORCID': {}})
no_provider_user = UserFactory(external_identity={})
linking_user.merge_user(creating_user)
assert linking_user.external_identity == {'ORCID': {'1234-1234-1234-1234': 'LINK'}}
linking_user.merge_user(verified_user)
assert linking_user.external_identity == {'ORCID': {'1234-1234-1234-1234': 'VERIFIED'}}
linking_user.merge_user(no_id_user)
assert linking_user.external_identity == {'ORCID': {'1234-1234-1234-1234': 'VERIFIED'}}
linking_user.merge_user(no_provider_user)
assert linking_user.external_identity == {'ORCID': {'1234-1234-1234-1234': 'VERIFIED'}}
linking_user.merge_user(different_id_user)
assert linking_user.external_identity == {'ORCID': {'1234-1234-1234-1234': 'VERIFIED', '4321-4321-4321-4321': 'VERIFIED'}}
assert creating_user.external_identity == {}
assert verified_user.external_identity == {}
assert no_id_user.external_identity == {}
assert no_provider_user.external_identity== {}
no_provider_user.merge_user(linking_user)
assert linking_user.external_identity == {}
assert no_provider_user.external_identity == {'ORCID': {'1234-1234-1234-1234': 'VERIFIED', '4321-4321-4321-4321': 'VERIFIED'}}
def test_merge_unregistered(self):
# test only those behaviors that are not tested with unconfirmed users
self._add_unregistered_user()
self.user.merge_user(self.unregistered)
self.project_with_unreg_contrib.reload()
assert self.user.is_invited is True
assert self.user in self.project_with_unreg_contrib.contributors
@mock.patch('website.project.views.contributor.mails.send_mail')
def test_merge_doesnt_send_signal(self, mock_notify):
#Explictly reconnect signal as it is disconnected by default for test
contributor_added.connect(notify_added_contributor)
other_user = UserFactory()
self.user.merge_user(other_user)
assert other_user.merged_by._id == self.user._id
assert mock_notify.called is False
class TestUserValidation(OsfTestCase):
def setUp(self):
super(TestUserValidation, self).setUp()
self.user = AuthUserFactory()
def test_validate_fullname_none(self):
self.user.fullname = None
with pytest.raises(ValidationError):
self.user.save()
def test_validate_fullname_empty(self):
self.user.fullname = ''
with pytest.raises(ValidationError):
self.user.save()
def test_validate_social_profile_websites_empty(self):
self.user.social = {'profileWebsites': []}
self.user.save()
assert self.user.social['profileWebsites'] == []
def test_validate_social_profile_website_many_different(self):
basepath = os.path.dirname(__file__)
url_data_path = os.path.join(basepath, '../website/static/urlValidatorTest.json')
with open(url_data_path) as url_test_data:
data = json.load(url_test_data)
fails_at_end = False
for should_pass in data["testsPositive"]:
try:
self.user.social = {'profileWebsites': [should_pass]}
self.user.save()
assert self.user.social['profileWebsites'] == [should_pass]
except ValidationError:
fails_at_end = True
print('\"' + should_pass + '\" failed but should have passed while testing that the validator ' + data['testsPositive'][should_pass])
for should_fail in data["testsNegative"]:
self.user.social = {'profileWebsites': [should_fail]}
try:
with pytest.raises(ValidationError):
self.user.save()
except AssertionError:
fails_at_end = True
print('\"' + should_fail + '\" passed but should have failed while testing that the validator ' + data['testsNegative'][should_fail])
if fails_at_end:
raise
def test_validate_multiple_profile_websites_valid(self):
self.user.social = {'profileWebsites': ['http://cos.io/', 'http://thebuckstopshere.com', 'http://dinosaurs.com']}
self.user.save()
assert self.user.social['profileWebsites'] == ['http://cos.io/', 'http://thebuckstopshere.com', 'http://dinosaurs.com']
def test_validate_social_profile_websites_invalid(self):
self.user.social = {'profileWebsites': ['help computer']}
with pytest.raises(ValidationError):
self.user.save()
def test_validate_multiple_profile_social_profile_websites_invalid(self):
self.user.social = {'profileWebsites': ['http://cos.io/', 'help computer', 'http://dinosaurs.com']}
with pytest.raises(ValidationError):
self.user.save()
def test_empty_social_links(self):
assert self.user.social_links == {}
assert len(self.user.social_links) == 0
def test_profile_website_unchanged(self):
self.user.social = {'profileWebsites': ['http://cos.io/']}
self.user.save()
assert self.user.social_links['profileWebsites'] == ['http://cos.io/']
assert len(self.user.social_links) == 1
def test_various_social_handles(self):
self.user.social = {
'profileWebsites': ['http://cos.io/'],
'twitter': 'OSFramework',
'github': 'CenterForOpenScience'
}
self.user.save()
assert self.user.social_links == {
'profileWebsites': ['http://cos.io/'],
'twitter': 'http://twitter.com/OSFramework',
'github': 'http://github.com/CenterForOpenScience'
}
def test_multiple_profile_websites(self):
self.user.social = {
'profileWebsites': ['http://cos.io/', 'http://thebuckstopshere.com', 'http://dinosaurs.com'],
'twitter': 'OSFramework',
'github': 'CenterForOpenScience'
}
self.user.save()
assert self.user.social_links == {
'profileWebsites': ['http://cos.io/', 'http://thebuckstopshere.com', 'http://dinosaurs.com'],
'twitter': 'http://twitter.com/OSFramework',
'github': 'http://github.com/CenterForOpenScience'
}
def test_nonsocial_ignored(self):
self.user.social = {
'foo': 'bar',
}
self.user.save()
assert self.user.social_links == {}
def test_validate_jobs_valid(self):
self.user.jobs = [{
'institution': 'School of Lover Boys',
'department': 'Fancy Patter',
'title': 'Lover Boy',
'startMonth': 1,
'startYear': '1970',
'endMonth': 1,
'endYear': '1980',
}]
self.user.save()
def test_validate_jobs_institution_empty(self):
self.user.jobs = [{'institution': ''}]
with pytest.raises(ValidationError):
self.user.save()
def test_validate_jobs_bad_end_date(self):
# end year is < start year
self.user.jobs = [{
'institution': fake.company(),
'department': fake.bs(),
'position': fake.catch_phrase(),
'startMonth': 1,
'startYear': '1970',
'endMonth': 1,
'endYear': '1960',
}]
with pytest.raises(ValidationError):
self.user.save()
def test_validate_schools_bad_end_date(self):
# end year is < start year
self.user.schools = [{
'degree': fake.catch_phrase(),
'institution': fake.company(),
'department': fake.bs(),
'startMonth': 1,
'startYear': '1970',
'endMonth': 1,
'endYear': '1960',
}]
with pytest.raises(ValidationError):
self.user.save()
def test_validate_jobs_bad_year(self):
start_year = ['hi', '20507', '99', '67.34']
for year in start_year:
self.user.jobs = [{
'institution': fake.company(),
'department': fake.bs(),
'position': fake.catch_phrase(),
'startMonth': 1,
'startYear': year,
'endMonth': 1,
'endYear': '1960',
}]
with pytest.raises(ValidationError):
self.user.save()
def test_validate_schools_bad_year(self):
start_year = ['hi', '20507', '99', '67.34']
for year in start_year:
self.user.schools = [{
'degree': fake.catch_phrase(),
'institution': fake.company(),
'department': fake.bs(),
'startMonth': 1,
'startYear': year,
'endMonth': 1,
'endYear': '1960',
}]
with pytest.raises(ValidationError):
self.user.save()
|
|
#!/usr/bin/env python
import argparse
import multiprocessing
import os
import queue
import re
import pandas
import pandas.io.formats.excel
from Bio import SeqIO
# Maximum columns allowed in a LibreOffice
# spreadsheet is 1024. Excel allows for
# 16,384 columns, but we'll set the lower
# number as the maximum. Some browsers
# (e.g., Firefox on Linux) are configured
# to use LibreOffice for Excel spreadsheets.
MAXCOLS = 1024
OUTPUT_EXCEL_DIR = 'output_excel_dir'
INPUT_JSON_AVG_MQ_DIR = 'input_json_avg_mq_dir'
INPUT_JSON_DIR = 'input_json_dir'
INPUT_NEWICK_DIR = 'input_newick_dir'
def annotate_table(table_df, group, annotation_dict):
for gbk_chrome, pro in list(annotation_dict.items()):
ref_pos = list(table_df)
ref_series = pandas.Series(ref_pos)
ref_df = pandas.DataFrame(ref_series.str.split(':', expand=True).values, columns=['reference', 'position'])
all_ref = ref_df[ref_df['reference'] == gbk_chrome]
positions = all_ref.position.to_frame()
# Create an annotation file.
annotation_file = "%s_annotations.csv" % group
with open(annotation_file, "a") as fh:
for _, row in positions.iterrows():
pos = row.position
try:
aaa = pro.iloc[pro.index.get_loc(int(pos))][['chrom', 'locus', 'product', 'gene']]
try:
chrom, name, locus, tag = aaa.values[0]
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except ValueError:
# If only one annotation for the entire
# chromosome (e.g., flu) then having [0] fails
chrom, name, locus, tag = aaa.values
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except KeyError:
print("{}:{}\tNo annotated product".format(gbk_chrome, pos), file=fh)
# Read the annotation file into a data frame.
annotations_df = pandas.read_csv(annotation_file, sep='\t', header=None, names=['index', 'annotations'], index_col='index')
# Remove the annotation_file from disk since both
# cascade and sort tables are built using the file,
# and it is opened for writing in append mode.
os.remove(annotation_file)
# Process the data.
table_df_transposed = table_df.T
table_df_transposed.index = table_df_transposed.index.rename('index')
table_df_transposed = table_df_transposed.merge(annotations_df, left_index=True, right_index=True)
table_df = table_df_transposed.T
return table_df
def excel_formatter(json_file_name, excel_file_name, group, annotation_dict):
pandas.io.formats.excel.header_style = None
table_df = pandas.read_json(json_file_name, orient='split')
if annotation_dict is not None:
table_df = annotate_table(table_df, group, annotation_dict)
else:
table_df = table_df.append(pandas.Series(name='no annotations'))
writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter')
table_df.to_excel(writer, sheet_name='Sheet1')
writer_book = writer.book
ws = writer.sheets['Sheet1']
format_a = writer_book.add_format({'bg_color': '#58FA82'})
format_g = writer_book.add_format({'bg_color': '#F7FE2E'})
format_c = writer_book.add_format({'bg_color': '#0000FF'})
format_t = writer_book.add_format({'bg_color': '#FF0000'})
format_normal = writer_book.add_format({'bg_color': '#FDFEFE'})
formatlowqual = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_ambigous = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_n = writer_book.add_format({'bg_color': '#E2CFDD'})
rows, cols = table_df.shape
ws.set_column(0, 0, 30)
ws.set_column(1, cols, 2.1)
ws.freeze_panes(2, 1)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows + 1, cols + 1, format_annotation)
# Make sure that row/column locations don't overlap.
ws.conditional_format(rows - 2, 1, rows - 1, cols, {'type': 'cell', 'criteria': '<', 'value': 55, 'format': formatlowqual})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'cell', 'criteria': '==', 'value': 'B$2', 'format': format_normal})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'A', 'format': format_a})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'G', 'format': format_g})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'C', 'format': format_c})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'T', 'format': format_t})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'S', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'Y', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'R', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'W', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'K', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'M', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'N', 'format': format_n})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': '-', 'format': format_n})
format_rotation = writer_book.add_format({})
format_rotation.set_rotation(90)
for column_num, column_name in enumerate(list(table_df.columns)):
ws.write(0, column_num + 1, column_name, format_rotation)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows, 400, format_annotation)
writer.save()
def get_annotation_dict(gbk_file):
gbk_dict = SeqIO.to_dict(SeqIO.parse(gbk_file, "genbank"))
annotation_dict = {}
tmp_file = "features.csv"
# Create a file of chromosomes and features.
for chromosome in list(gbk_dict.keys()):
with open(tmp_file, 'w+') as fh:
for feature in gbk_dict[chromosome].features:
if "CDS" in feature.type or "rRNA" in feature.type:
try:
product = feature.qualifiers['product'][0]
except KeyError:
product = None
try:
locus = feature.qualifiers['locus_tag'][0]
except KeyError:
locus = None
try:
gene = feature.qualifiers['gene'][0]
except KeyError:
gene = None
fh.write("%s\t%d\t%d\t%s\t%s\t%s\n" % (chromosome, int(feature.location.start), int(feature.location.end), locus, product, gene))
# Read the chromosomes and features file into a data frame.
df = pandas.read_csv(tmp_file, sep='\t', names=["chrom", "start", "stop", "locus", "product", "gene"])
# Process the data.
df = df.sort_values(['start', 'gene'], ascending=[True, False])
df = df.drop_duplicates('start')
pro = df.reset_index(drop=True)
pro.index = pandas.IntervalIndex.from_arrays(pro['start'], pro['stop'], closed='both')
annotation_dict[chromosome] = pro
return annotation_dict
def get_sample_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
return base_file_name
def output_cascade_table(cascade_order, mqdf, group, annotation_dict):
cascade_order_mq = pandas.concat([cascade_order, mqdf], join='inner')
output_table(cascade_order_mq, "cascade", group, annotation_dict)
def output_excel(df, type_str, group, annotation_dict, count=None):
# Output the temporary json file that
# is used by the excel_formatter.
if count is None:
if group is None:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_order_mq.json" % type_str)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table.xlsx" % type_str)
else:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_order_mq.json" % (group, type_str))
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table.xlsx" % (group, type_str))
else:
# The table has more columns than is allowed by the
# MAXCOLS setting, so multiple files will be produced
# as an output collection.
if group is None:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_order_mq_%d.json" % (type_str, count))
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table_%d.xlsx" % (type_str, count))
else:
json_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_order_mq_%d.json" % (group, type_str, count))
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table_%d.xlsx" % (group, type_str, count))
df.to_json(json_file_name, orient='split')
# Output the Excel file.
excel_formatter(json_file_name, excel_file_name, group, annotation_dict)
def output_sort_table(cascade_order, mqdf, group, annotation_dict):
sort_df = cascade_order.T
sort_df['abs_value'] = sort_df.index
sort_df[['chrom', 'pos']] = sort_df['abs_value'].str.split(':', expand=True)
sort_df = sort_df.drop(['abs_value', 'chrom'], axis=1)
sort_df.pos = sort_df.pos.astype(int)
sort_df = sort_df.sort_values(by=['pos'])
sort_df = sort_df.drop(['pos'], axis=1)
sort_df = sort_df.T
sort_order_mq = pandas.concat([sort_df, mqdf], join='inner')
output_table(sort_order_mq, "sort", group, annotation_dict)
def output_table(df, type_str, group, annotation_dict):
if isinstance(group, str) and group.startswith("dataset"):
# Inputs are single files, not collections,
# so input file names are not useful for naming
# output files.
group_str = None
else:
group_str = group
count = 0
chunk_start = 0
chunk_end = 0
column_count = df.shape[1]
if column_count >= MAXCOLS:
# Here the number of columns is greater than
# the maximum allowed by Excel, so multiple
# outputs will be produced.
while column_count >= MAXCOLS:
count += 1
chunk_end += MAXCOLS
df_of_type = df.iloc[:, chunk_start:chunk_end]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
chunk_start += MAXCOLS
column_count -= MAXCOLS
count += 1
df_of_type = df.iloc[:, chunk_start:]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
else:
output_excel(df, type_str, group_str, annotation_dict)
def preprocess_tables(task_queue, annotation_dict, timeout):
while True:
try:
tup = task_queue.get(block=True, timeout=timeout)
except queue.Empty:
break
newick_file, json_file, json_avg_mq_file = tup
avg_mq_series = pandas.read_json(json_avg_mq_file, typ='series', orient='split')
# Map quality to dataframe.
mqdf = avg_mq_series.to_frame(name='MQ')
mqdf = mqdf.T
# Get the group.
group = get_sample_name(newick_file)
snps_df = pandas.read_json(json_file, orient='split')
with open(newick_file, 'r') as fh:
for line in fh:
line = re.sub('[:,]', '\n', line)
line = re.sub('[)(]', '', line)
line = re.sub(r'[0-9].*\.[0-9].*\n', '', line)
line = re.sub('root\n', '', line)
sample_order = line.split('\n')
sample_order = list([_f for _f in sample_order if _f])
sample_order.insert(0, 'root')
tree_order = snps_df.loc[sample_order]
# Count number of SNPs in each column.
snp_per_column = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
for element in column:
if element != column[0]:
count = count + 1
snp_per_column.append(count)
row1 = pandas.Series(snp_per_column, tree_order.columns, name="snp_per_column")
# Count number of SNPS from the
# top of each column in the table.
snp_from_top = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
# for each element in the column
# skip the first element
for element in column[1:]:
if element == column[0]:
count = count + 1
else:
break
snp_from_top.append(count)
row2 = pandas.Series(snp_from_top, tree_order.columns, name="snp_from_top")
tree_order = tree_order.append([row1])
tree_order = tree_order.append([row2])
# In pandas=0.18.1 even this does not work:
# abc = row1.to_frame()
# abc = abc.T --> tree_order.shape (5, 18), abc.shape (1, 18)
# tree_order.append(abc)
# Continue to get error: "*** ValueError: all the input arrays must have same number of dimensions"
tree_order = tree_order.T
tree_order = tree_order.sort_values(['snp_from_top', 'snp_per_column'], ascending=[True, False])
tree_order = tree_order.T
# Remove snp_per_column and snp_from_top rows.
cascade_order = tree_order[:-2]
# Output the cascade table.
output_cascade_table(cascade_order, mqdf, group, annotation_dict)
# Output the sorted table.
output_sort_table(cascade_order, mqdf, group, annotation_dict)
task_queue.task_done()
def set_num_cpus(num_files, processes):
num_cpus = int(multiprocessing.cpu_count())
if num_files < num_cpus and num_files < processes:
return num_files
if num_cpus < processes:
half_cpus = int(num_cpus / 2)
if num_files < half_cpus:
return num_files
return half_cpus
return processes
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_avg_mq_json', action='store', dest='input_avg_mq_json', required=False, default=None, help='Average MQ json file')
parser.add_argument('--input_newick', action='store', dest='input_newick', required=False, default=None, help='Newick file')
parser.add_argument('--input_snps_json', action='store', dest='input_snps_json', required=False, default=None, help='SNPs json file')
parser.add_argument('--gbk_file', action='store', dest='gbk_file', required=False, default=None, help='Optional gbk file'),
parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting')
args = parser.parse_args()
if args.gbk_file is not None:
# Create the annotation_dict for annotating
# the Excel tables.
annotation_dict = get_annotation_dict(args.gbk_file)
else:
annotation_dict = None
# The assumption here is that the list of files
# in both INPUT_NEWICK_DIR and INPUT_JSON_DIR are
# named such that they are properly matched if
# the directories contain more than 1 file (i.e.,
# hopefully the newick file names and json file names
# will be something like Mbovis-01D6_* so they can be
# sorted and properly associated with each other).
if args.input_newick is not None:
newick_files = [args.input_newick]
else:
newick_files = []
for file_name in sorted(os.listdir(INPUT_NEWICK_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_NEWICK_DIR, file_name))
newick_files.append(file_path)
if args.input_snps_json is not None:
json_files = [args.input_snps_json]
else:
json_files = []
for file_name in sorted(os.listdir(INPUT_JSON_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_JSON_DIR, file_name))
json_files.append(file_path)
if args.input_avg_mq_json is not None:
json_avg_mq_files = [args.input_avg_mq_json]
else:
json_avg_mq_files = []
for file_name in sorted(os.listdir(INPUT_JSON_AVG_MQ_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_JSON_AVG_MQ_DIR, file_name))
json_avg_mq_files.append(file_path)
multiprocessing.set_start_method('spawn')
queue1 = multiprocessing.JoinableQueue()
queue2 = multiprocessing.JoinableQueue()
num_files = len(newick_files)
cpus = set_num_cpus(num_files, args.processes)
# Set a timeout for get()s in the queue.
timeout = 0.05
for i, newick_file in enumerate(newick_files):
json_file = json_files[i]
json_avg_mq_file = json_avg_mq_files[i]
queue1.put((newick_file, json_file, json_avg_mq_file))
# Complete the preprocess_tables task.
processes = [multiprocessing.Process(target=preprocess_tables, args=(queue1, annotation_dict, timeout, )) for _ in range(cpus)]
for p in processes:
p.start()
for p in processes:
p.join()
queue1.join()
if queue1.empty():
queue1.close()
queue1.join_thread()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.