gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python3
"""
Template by pypi-mobans
"""
import os
import sys
import codecs
import locale
import platform
from shutil import rmtree
from setuptools import Command, setup, find_packages
PY2 = sys.version_info[0] == 2
PY26 = PY2 and sys.version_info[1] < 7
PY33 = sys.version_info < (3, 4)
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
# This work around is only if a project supports Python < 3.4
# Work around for locale not being set
try:
lc = locale.getlocale()
pf = platform.system()
if pf != "Windows" and lc == (None, None):
locale.setlocale(locale.LC_ALL, "C.UTF-8")
except (ValueError, UnicodeError, locale.Error):
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
NAME = "pyexcel-io"
AUTHOR = "C.W."
VERSION = "0.6.6"
EMAIL = "info@pyexcel.org"
LICENSE = "New BSD"
DESCRIPTION = (
"A python library to read and write structured data in csv, zipped csv" +
"format and to/from databases"
)
URL = "https://github.com/pyexcel/pyexcel-io"
DOWNLOAD_URL = "%s/archive/0.6.6.tar.gz" % URL
FILES = ["README.rst", "CHANGELOG.rst"]
KEYWORDS = [
"python",
"API",
"tsv",
"tsvz",
"csv",
"csvz",
"django",
"sqlalchemy",
]
CLASSIFIERS = [
"Topic :: Software Development :: Libraries",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: Implementation :: PyPy'
]
PYTHON_REQUIRES = ">=3.6"
INSTALL_REQUIRES = [
"lml>=0.0.4",
]
SETUP_COMMANDS = {}
PACKAGES = find_packages(exclude=["ez_setup", "examples", "tests", "tests.*"])
EXTRAS_REQUIRE = {
"xls": ['pyexcel-xls>=0.6.0'],
"xlsx": ['pyexcel-xlsx>=0.6.0'],
"ods": ['pyexcel-ods3>=0.6.0'],
}
# You do not need to read beyond this line
PUBLISH_COMMAND = "{0} setup.py sdist bdist_wheel upload -r pypi".format(sys.executable)
HERE = os.path.abspath(os.path.dirname(__file__))
GS_COMMAND = ("gease pyexcel-io v0.6.6 " +
"Find 0.6.6 in changelog for more details")
NO_GS_MESSAGE = ("Automatic github release is disabled. " +
"Please install gease to enable it.")
UPLOAD_FAILED_MSG = (
'Upload failed. please run "%s" yourself.' % PUBLISH_COMMAND)
class PublishCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package on github and pypi"
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds...")
rmtree(os.path.join(HERE, "dist"))
rmtree(os.path.join(HERE, "build"))
rmtree(os.path.join(HERE, "pyexcel_io.egg-info"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution...")
run_status = True
if has_gease():
run_status = os.system(GS_COMMAND) == 0
else:
self.status(NO_GS_MESSAGE)
if run_status:
if os.system(PUBLISH_COMMAND) != 0:
self.status(UPLOAD_FAILED_MSG)
sys.exit()
SETUP_COMMANDS.update({
"publish": PublishCommand
})
def has_gease():
"""
test if github release command is installed
visit http://github.com/moremoban/gease for more info
"""
try:
import gease # noqa
return True
except ImportError:
return False
def read_files(*files):
"""Read files into setup"""
text = ""
for single_file in files:
content = read(single_file)
text = text + content + "\n"
return text
def read(afile):
"""Read a file into setup"""
the_relative_file = os.path.join(HERE, afile)
with codecs.open(the_relative_file, "r", "utf-8") as opened_file:
content = filter_out_test_code(opened_file)
content = "".join(list(content))
return content
def filter_out_test_code(file_handle):
found_test_code = False
for line in file_handle.readlines():
if line.startswith(".. testcode:"):
found_test_code = True
continue
if found_test_code is True:
if line.startswith(" "):
continue
else:
empty_line = line.strip()
if len(empty_line) == 0:
continue
else:
found_test_code = False
yield line
else:
for keyword in ["|version|", "|today|"]:
if keyword in line:
break
else:
yield line
if __name__ == "__main__":
setup(
test_suite="tests",
name=NAME,
author=AUTHOR,
version=VERSION,
author_email=EMAIL,
description=DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
long_description=read_files(*FILES),
license=LICENSE,
keywords=KEYWORDS,
python_requires=PYTHON_REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=["nose"],
install_requires=INSTALL_REQUIRES,
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
classifiers=CLASSIFIERS,
cmdclass=SETUP_COMMANDS
)
|
|
# Copyright 2015 Rackspace All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import docker
from docker import errors
import mock
from oslo_config import cfg
import six
from magnum.common import exception
from magnum.conductor.handlers import docker_conductor
from magnum import objects
from magnum.objects import container as obj_container
from magnum.tests import base
from mock import patch
CONF = cfg.CONF
class TestDockerConductor(base.BaseTestCase):
def setUp(self):
super(TestDockerConductor, self).setUp()
self.conductor = docker_conductor.Handler()
@mock.patch.object(docker_conductor, 'docker_client')
def test_docker_for_bay(self, mock_docker_client):
mock_docker = mock.MagicMock()
mock_docker_client.DockerHTTPClient.return_value = mock_docker
mock_bay = mock.MagicMock()
mock_bay.api_address = '1.1.1.1'
actual_docker = self.conductor._docker_for_bay(mock_bay)
self.assertEqual(mock_docker, actual_docker)
args = ('tcp://1.1.1.1:2376', CONF.docker.docker_remote_api_version,
CONF.docker.default_timeout)
mock_docker_client.DockerHTTPClient.assert_called_once_with(*args)
@mock.patch.object(docker_conductor, 'docker_client')
@mock.patch.object(docker_conductor.objects.Bay, 'get_by_uuid')
def test_get_docker_client(self, mock_bay_get_by_uuid,
mock_docker_client):
mock_docker = mock.MagicMock()
mock_docker_client.DockerHTTPClient.return_value = mock_docker
mock_bay = mock.MagicMock()
mock_bay.api_address = '1.1.1.1'
mock_bay_get_by_uuid.return_value = mock_bay
mock_container = mock.MagicMock()
mock_container.bay_uuid = '9fb6c41e-a7e4-48b8-97c4-702b26034b8e'
actual_docker = self.conductor.get_docker_client(
mock.sentinel.context,
mock_container)
self.assertEqual(mock_docker, actual_docker)
args = ('tcp://1.1.1.1:2376', CONF.docker.docker_remote_api_version,
CONF.docker.default_timeout)
mock_bay_get_by_uuid.assert_called_once_with(mock.sentinel.context,
mock_container.bay_uuid)
mock_docker_client.DockerHTTPClient.assert_called_once_with(*args)
@mock.patch.object(docker_conductor, 'docker_client')
@mock.patch.object(docker_conductor.objects.Bay, 'get_by_uuid')
@mock.patch.object(docker_conductor.objects.Container, 'get_by_uuid')
def test_get_docker_client_container_uuid(self,
mock_container_get_by_uuid,
mock_bay_get_by_uuid,
mock_docker_client):
mock_docker = mock.MagicMock()
mock_docker_client.DockerHTTPClient.return_value = mock_docker
mock_bay = mock.MagicMock()
mock_bay.api_address = '1.1.1.1'
mock_bay_get_by_uuid.return_value = mock_bay
mock_container = mock.MagicMock()
mock_container.uuid = '8e48ffb1-754d-4f21-bdd0-1a39bf796389'
mock_container.bay_uuid = '9fb6c41e-a7e4-48b8-97c4-702b26034b8e'
mock_container_get_by_uuid.return_value = mock_container
actual_docker = self.conductor.get_docker_client(mock.sentinel.context,
mock_container.uuid)
self.assertEqual(mock_docker, actual_docker)
args = ('tcp://1.1.1.1:2376', CONF.docker.docker_remote_api_version,
CONF.docker.default_timeout)
mock_container_get_by_uuid.assert_called_once_with(
mock.sentinel.context,
mock_container.uuid)
mock_bay_get_by_uuid.assert_called_once_with(mock.sentinel.context,
mock_container.bay_uuid)
mock_docker_client.DockerHTTPClient.assert_called_once_with(*args)
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_create(self, mock_get_docker_client):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_container.name = 'some-name'
mock_container.uuid = 'some-uuid'
mock_container.image = 'test_image:some_tag'
mock_container.command = None
container = self.conductor.container_create(
None, mock_container)
utf8_image = self.conductor._encode_utf8(mock_container.image)
mock_docker.pull.assert_called_once_with('test_image',
tag='some_tag')
mock_docker.inspect_image.assert_called_once_with(utf8_image)
mock_docker.create_container.assert_called_once_with(
mock_container.image,
name='some-name',
hostname='some-uuid',
command=None)
self.assertEqual(obj_container.STOPPED, container.status)
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_create_with_command(self, mock_get_docker_client):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_container.name = 'some-name'
mock_container.uuid = 'some-uuid'
mock_container.image = 'test_image:some_tag'
mock_container.command = 'env'
container = self.conductor.container_create(
None, mock_container)
utf8_image = self.conductor._encode_utf8(mock_container.image)
mock_docker.pull.assert_called_once_with('test_image',
tag='some_tag')
mock_docker.inspect_image.assert_called_once_with(utf8_image)
mock_docker.create_container.assert_called_once_with(
mock_container.image,
name='some-name',
hostname='some-uuid',
command='env')
self.assertEqual(obj_container.STOPPED, container.status)
def test_encode_utf8_unicode(self):
image = 'some_image:some_tag'
unicode_image = six.u(image)
utf8_image = self.conductor._encode_utf8(unicode_image)
self.assertEqual(image, utf8_image)
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_create_with_failure(self, mock_get_docker_client):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_container.image = 'test_image:some_tag'
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.pull = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_create,
None, mock_container)
mock_docker.pull.assert_called_once_with(
'test_image',
tag='some_tag')
self.assertFalse(mock_docker.create_container.called)
mock_init.assert_called_once_with()
self.assertEqual(obj_container.ERROR, mock_container.status)
def test_find_container_by_name_not_found(self):
mock_docker = mock.MagicMock()
fake_response = mock.MagicMock()
fake_response.content = 'not_found'
fake_response.status_code = 404
mock_docker.list_instances.side_effect = errors.APIError(
'not_found', fake_response)
ret = self.conductor._find_container_by_name(mock_docker, '1')
self.assertEqual({}, ret)
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_delete(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_delete(None, mock_container_uuid)
mock_docker.remove_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_delete_with_container_not_exist(
self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = {}
mock_find_container.return_value = mock_docker_id
res = self.conductor.container_delete(None, mock_container_uuid)
self.assertIsNone(res)
self.assertFalse(mock_docker.remove_container.called)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_delete_with_failure(
self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.remove_container = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_delete,
None, mock_container_uuid)
mock_docker.remove_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_action(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor._container_action(None, mock_container_uuid,
'fake-status', 'fake-func')
self.assertEqual('fake-status', mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_reboot(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_reboot(None, mock_container_uuid)
mock_docker.restart.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(obj_container.RUNNING, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_reboot_with_failure(self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.restart = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_reboot,
None, mock_container_uuid)
mock_docker.restart.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_start(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_start(None, mock_container_uuid)
mock_docker.start.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(obj_container.RUNNING, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_start_with_failure(self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.start = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_start,
None, mock_container_uuid)
mock_docker.start.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_stop(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_stop(None, mock_container_uuid)
mock_docker.stop.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(obj_container.STOPPED, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_stop_with_failure(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.stop = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_stop,
None, mock_container_uuid)
mock_docker.stop.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_pause(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_pause(None, mock_container_uuid)
mock_docker.pause.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(obj_container.PAUSED, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_pause_with_failure(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.pause = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_pause,
None, mock_container_uuid)
mock_docker.pause.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_unpause(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_unpause(None, mock_container_uuid)
mock_docker.unpause.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(obj_container.RUNNING, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_unpause_with_failure(self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.unpause = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_unpause,
None, mock_container_uuid)
mock_docker.unpause.assert_called_once_with(mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show(self, mock_get_docker_client,
mock_find_container, mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_show(None, mock_container_uuid)
mock_docker.inspect_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_running_state(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
mock_container_detail = {'State': {'Error': '',
'Running': True,
'Paused': False}}
mock_docker.inspect_container.return_value = mock_container_detail
self.conductor.container_show(None, mock_container_uuid)
self.assertEqual(obj_container.RUNNING, mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_stop_state(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
mock_container_detail = {'State': {'Error': '',
'Running': False,
'Paused': False}}
mock_docker.inspect_container.return_value = mock_container_detail
self.conductor.container_show(None, mock_container_uuid)
self.assertEqual(obj_container.STOPPED, mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_pause_state(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
mock_container_detail = {'State': {'Error': '',
'Running': False,
'Paused': True}}
mock_docker.inspect_container.return_value = mock_container_detail
self.conductor.container_show(None, mock_container_uuid)
self.assertEqual(obj_container.PAUSED, mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@mock.patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_error_status(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
mock_container_detail = {'State': {'Error': True,
'Running': False,
'Paused': False}}
mock_docker.inspect_container.return_value = mock_container_detail
self.conductor.container_show(None, mock_container_uuid)
self.assertEqual(obj_container.ERROR, mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_failure(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_get_by_uuid.return_value = mock.MagicMock()
mock_container_uuid = 'd545a92d-609a-428f-8edb-1d6b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.inspect_container = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_show,
None, mock_container_uuid)
mock_docker.inspect_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_not_found(self, mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-1d6b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='404 error') as mock_init:
mock_docker.inspect_container = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.conductor.container_show(None, mock_container_uuid)
mock_docker.inspect_container.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
self.assertEqual(obj_container.ERROR, mock_container.status)
@mock.patch.object(objects.Container, 'get_by_uuid')
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_show_with_not_found_from_docker(self,
mock_get_docker_client,
mock_find_container,
mock_get_by_uuid):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container = mock.MagicMock()
mock_get_by_uuid.return_value = mock_container
mock_container_uuid = 'd545a92d-609a-428f-8edb-1d6b02ad20ca1'
mock_docker_id = {}
mock_find_container.return_value = mock_docker_id
self.conductor.container_show(None, mock_container_uuid)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
self.assertEqual(obj_container.ERROR, mock_container.status)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_exec(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
docker.version = '1.2.2'
mock_find_container.return_value = mock_docker_id
mock_create_res = mock.MagicMock()
mock_docker.exec_create.return_value = mock_create_res
self.conductor.container_exec(None, mock_container_uuid, 'ls')
mock_docker.exec_create.assert_called_once_with(mock_docker_id, 'ls',
True, True, False)
mock_docker.exec_start.assert_called_once_with(mock_create_res,
False, False, False)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_exec_deprecated(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
docker.version = '0.7.0'
mock_find_container.return_value = mock_docker_id
mock_create_res = mock.MagicMock()
mock_docker.exec_create.return_value = mock_create_res
self.conductor.container_exec(None, mock_container_uuid, 'ls')
mock_docker.execute.assert_called_once_with(mock_docker_id, 'ls')
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_exec_with_failure(self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
docker.version = '1.2.2'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.exec_create = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_exec,
None, mock_container_uuid, 'ls')
mock_docker.exec_create.assert_called_once_with(mock_docker_id,
'ls', True, True,
False)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_exec_deprecated_with_failure(self,
mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
docker.version = '0.7.0'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.execute = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_exec,
None, mock_container_uuid, 'ls')
mock_docker.execute.assert_called_once_with(mock_docker_id, 'ls')
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_logs(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
self.conductor.container_logs(None, mock_container_uuid)
mock_docker.get_container_logs.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
@patch.object(docker_conductor.Handler, '_find_container_by_name')
@mock.patch.object(docker_conductor.Handler, 'get_docker_client')
def test_container_logs_with_failure(self, mock_get_docker_client,
mock_find_container):
mock_docker = mock.MagicMock()
mock_get_docker_client.return_value = mock_docker
mock_container_uuid = 'd545a92d-609a-428f-8edb-16b02ad20ca1'
mock_docker_id = '2703ef2b705d'
mock_find_container.return_value = mock_docker_id
with patch.object(errors.APIError, '__str__',
return_value='hit error') as mock_init:
mock_docker.get_container_logs = mock.Mock(
side_effect=errors.APIError('Error', '', ''))
self.assertRaises(exception.ContainerException,
self.conductor.container_logs,
None, mock_container_uuid)
mock_docker.get_container_logs.assert_called_once_with(
mock_docker_id)
mock_find_container.assert_called_once_with(mock_docker,
mock_container_uuid)
mock_init.assert_called_once_with()
def test_container_common_exception(self):
for action in ('container_exec', 'container_logs', 'container_show',
'container_delete', 'container_create',
'_container_action'):
func = getattr(self.conductor, action)
with patch.object(docker_conductor,
'docker_client') as mock_docker:
mock_docker.side_effect = Exception("So bad")
self.assertRaises(exception.ContainerException,
func, None, None)
|
|
# Jacqueline Kory Westlund
# May 2016
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Personal Robots Group
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging # log messages
from ss_db_manager import ss_db_manager
from SS_Errors import NoStoryFound
class ss_personalization_manager():
""" Determine personalization for a participant, given their past
performance and the current session """
def __init__(self, session, participant, database,
percent_correct_to_level):
""" Initialize stuff """
# Set up logger.
self._logger = logging.getLogger(__name__)
self._logger.info("Initializing personalization manager...")
# Save participant and session so we can use them to get their
# past performance from the database, which we will use to
# determine which stories to present and what level to use.
self._participant = participant
self._session = session
# Store the percent of questions a player has to get right in
# the previous session in order to level up in this session, so
# we can use it to determine whether a player will level up.
self._percent_correct_to_level = percent_correct_to_level
# Get database manager, but don't require the database for a
# DEMO session!
if (self._session != -1):
self._db_man = ss_db_manager(database)
# Get the level for this session.
self._level = self.get_level_for_session()
# In each session, alternate between telling new stories and
# telling previously told stories (if possible -- obviously in
# the earlier sessions, there is less review available). Start
# with a new story.
self._tell_new_story = True
# We don't have a current story yet.
self._current_story = None
# We can't get a queue of stories, because we don't know how
# many we would need to queue up. Instead, get a list of the
# emotions that the participant needs the most practice with
# that should be present in the stories this session. These
# will be the emotions gotten incorrect in the past session.
# Skip this if this is a demo session.
if (self._session != -1):
self._emotion_list = self._db_man.get_most_recent_incorrect_emotions(
self._participant, self._session)
def get_level_for_session(self):
""" Determine which level the stories told in this session
should be at.
"""
# Use level 1 if this is a demo session.
if (self._session == -1):
return 1
# Data for this participant's last session is in the database.
# Use their past performance to decide whether to level up.
# need last time's level, number of questions correct last time
level = self._db_man.get_most_recent_level(self._participant,
self._session)
# If there is no previous data, start at level 1.
if (level is None):
return 1
# If participant got 75%-80% questions correct last time, level
# up. If no responses were found or not enough were answered
# correctly, do not level up.
#TODO total performance or just last time's performance?
past_performance = self._db_man.get_percent_correct_responses(
self._participant, (self._session - 1))
if past_performance is None:
self._logger.info("Participant did not answer any questions last "
+ "time, so we will not level up. Level will be " + str(level)
+ ".")
return level
elif (past_performance >= self._percent_correct_to_level):
self._logger.info("Participant got more than " +
str(self._percent_correct_to_level*100) +
"% questions correct last time, so we can level up! Level will"
" be " + str(level+1) + ".")
return level + 1 if level < 10 else level
else:
self._logger.info("Participant got less than " +
str(self._percent_correct_to_level*100) +
"% questions correct last time, so we don't level up. Level"
" will be " + str(level) + ".")
return level
def get_performance_this_session(self):
""" Get the user's performance on all questions asked this
session, by question type.
"""
# Only get the user's performance if this isn't a DEMO session.
if (self._session != -1):
# Get the user's performance on the emotion questions, on
# the theory of mind questions, and on the order questions.
return self._db_man.get_percent_correct_responses(
self._participant, self._session, "emotion"), \
self._db_man.get_percent_correct_responses(self._participant,
self._session, "ToM"), \
self._db_man.get_percent_correct_responses( \
self._participant, self._session, "order")
else:
return None
def get_next_story_script(self):
""" Return the name of the next story script to load. """
# If this is a demo session, use the demo script.
if (self._session == -1):
return "demo-story-1.txt"
# If no story has been picked yet, print error and pick a story.
elif (self._current_story is None):
self._logger.error("We were asked for the story script, but we "
"haven't picked a story yet! Picking a story...")
self._current_story = self.pick_next_story()
# Return name of story script: story name + level + file extension.
return (self._current_story + "-" + str(self._level) + ".txt").lower()
def pick_next_story(self):
""" Determine which story should be heard next. We have some number of
stories. Alternate telling new stories and telling review stories.
Earlier sessions will use more new stories since there isn't much to
review. Return name of the next script file, based on which emotions
should be present in the stories, which stories have already been
heard, whether we should tell a review story, and the current level.
"""
# If this is a demo session, use the demo script.
if (self._session == -1):
self._logger.debug("Using DEMO script.")
# Save that we are using the demo story.
self._current_story = "demo-story-1"
return "demo-story-1"
# We start without having picked the next story.
story = None
# If we should tell a new story, get the next new story that
# has one of the emotions to practice in it. If there aren't
# any stories with one of those emotions, just get the next new
# story.
if self._tell_new_story:
story = self._db_man.get_next_new_story(self._participant,
self._emotion_list, self._level)
# If there are no more new stories to tell, or if we need to
# tell a review story next, get a review story that has one of
# the emotions to practice in it. If there aren't any with
# those emotions, get the oldest, least played review story.
if (story is None) or not self._tell_new_story:
story = self._db_man.get_next_review_story(self._participant,
self._session, self._emotion_list, self._level)
# If there are no review stories available, get a new story
# instead (this may happen if we are supposed to tell a review
# story but haven't told very many stories yet).
if (story is None):
story = self._db_man.get_next_new_story(self._participant,
self._emotion_list, self._level)
# If we still don't have a story, then for some reason there
# are no new stories and no review stories we can tell. This is
# a problem.
if (story is None):
self._logger.error("We were supposed to get the next story \
but could not find a new or a review story that we \
can play.")
raise NoStoryFound("Could not find new or review story to play.",
self._participant, self._session)
# Toggle flag for telling new versus telling previously heard
# stories (since we alternate).
self._tell_new_story = not self._tell_new_story
# Save current story so we can provide story details later.
self._current_story = story
# Return name of the story.
return story
def get_next_story_details(self):
""" Determine the number of scenes, whether they are shown in
order, and the number of answer options for the next story.
"""
# If this is a demo session, load a demo scene.
if (self._session == -1):
# Demo set:
graphic_names = ["CR1-a-b.png", "CR1-b-b.png",
"CR1-c-b.png", "CR1-d-b.png"]
# Demo story has scenes in order.
in_order = True
# Demo has 4 scenes.
num_answers = 4
self._logger.debug("DEMO story:\nScenes: " + str(graphic_names)
+ "\nIn order: " + str(in_order)
+ "\nNum answers: " + str(num_answers))
# Otherwise, we will get the details for the current story.
else:
# If the current story isn't set, print error, and pick a story.
if (self._current_story is None):
self._logger.error("We were asked for story details, but we \
haven't picked a story yet! Picking a story...")
self._current_story = self.pick_next_story()
# Get story information from the database: scene graphics
# names, whether the scenes are shown in order, how many
# answer options there are per question at this level.
graphic_names = self._db_man.get_graphics(self._current_story,
self._level)
num_answers, in_order = self._db_man.get_level_info(self._level)
# Return the story information.
return graphic_names, in_order, num_answers
def record_story_loaded(self):
""" Record that we loaded a story, and that this participant is
playing this story.
"""
# Skip if this is a demo session; otherwise record.
if (self._session != -1):
self._db_man.record_story_played(self._participant, self._session,
self._level, self._current_story)
def record_user_response(self, question_num, question_type, response):
""" Record that the participant responded to one of the story
questions.
"""
# Skip if this is a demo session; otherwise record.
if (self._session != -1):
self._db_man.record_response(self._participant, self._session,
self._level, self._current_story, question_num, question_type,
response)
def set_start_level(self, level):
""" When the game starts, a level to start at can be provided.
We're going to ignore this and use our internal database and
personalization algorithm to determine leveling, but we will
print out an error if the level we pick is different from the
level we are told to start at.
"""
if (level != self._level):
self._logger.warning("We were told to play at level " + str(level)
+ " but our internal personalization algorithm says we should "
+ "play at level " + str(self._level) + ". We will be playing "
+ "at level " + str(self._level))
def get_joint_attention_level(self):
""" Determine what level of joint attention scaffolding to provide
each time it is required.
"""
self._logger.debug("TODO determine joint attention level")
|
|
"""
UDisks wrapper utilities.
These act as a convenience abstraction layer on the UDisks DBus service.
Requires UDisks 1.0.5 as described here:
http://udisks.freedesktop.org/docs/1.0.5/
This wraps the DBus API of Udisks2 providing a common interface with the
udisks2 module.
Overview: This module exports the classes ``Sniffer`` and ``Daemon``.
:class:`Sniffer` can be used as an online exporter of the current device
states queried from the UDisks DBus service as requested.
:class:`Daemon` caches all device states and listens to UDisks events to
guarantee the accessibilityy of device properties in between operations.
"""
from copy import copy
from inspect import getmembers
import logging
import os.path
from udiskie.common import Emitter, samefile
from udiskie.compat import filter
from udiskie.dbus import DBusService, DBusException
from udiskie.locale import _
__all__ = ['Sniffer', 'Daemon']
def filter_opt(opt):
"""Remove ``None`` values from a dictionary."""
return [k for k, v in opt.items() if v]
class DeviceBase(object):
"""Helper base class for devices."""
Interface = 'org.freedesktop.UDisks.Device'
Exception = DBusException
# string representation
def __str__(self):
"""Display as object path."""
return self.object_path
def __eq__(self, other):
"""Comparison by object path."""
return self.object_path == str(other)
def __ne__(self, other):
"""Comparison by object path."""
return not (self == other)
def __nonzero__(self): # python2
"""Check device validity."""
return self.is_valid
__bool__ = __nonzero__ # python3
def is_file(self, path):
"""Comparison by mount and device file path."""
return samefile(path, self.device_file) or any(
samefile(path, mp) for mp in self.mount_paths)
class OnlineDevice(DeviceBase):
"""
Online wrapper for org.freedesktop.UDisks.Device DBus API proxy objects.
Resolves both property access and method calls dynamically to the DBus
object.
This is the main class used to retrieve (and then possibly cache) device
properties from the DBus backend.
"""
# construction
def __init__(self, udisks, object):
"""
Initialize an instance with the given DBus proxy object.
:param DBusObject object:
"""
self._proxy = object.get_interface(self.Interface)
self.object_path = object.object_path
self.udisks = udisks
# availability of interfaces
@property
def is_valid(self):
"""Check if there is a valid DBus object for this object path."""
try:
self._proxy.property.DeviceFile
return True
except self.Exception:
return False
@property
def is_drive(self):
"""Check if the device is a drive."""
return self._proxy.property.DeviceIsDrive
@property
def is_block(self):
"""Check if the device is a block device."""
return True
@property
def is_partition_table(self):
"""Check if the device is a partition table."""
return self._proxy.property.DeviceIsPartitionTable
@property
def is_partition(self):
"""Check if the device has a partition slave."""
return self._proxy.property.DeviceIsPartition
@property
def is_filesystem(self):
"""Check if the device is a filesystem."""
return self.id_usage == 'filesystem'
@property
def is_luks(self):
"""Check if the device is a LUKS container."""
return self._proxy.property.DeviceIsLuks
# ----------------------------------------
# Drive
# ----------------------------------------
# Drive properties
is_toplevel = is_drive
@property
def is_detachable(self):
"""Check if the drive that owns this device can be detached."""
if not self.is_drive:
return None
return self._proxy.property.DriveCanDetach
@property
def is_ejectable(self):
"""Check if the drive that owns this device can be ejected."""
if not self.is_drive:
return None
return self._proxy.property.DriveIsMediaEjectable
@property
def has_media(self):
"""Check if there is media available in the drive."""
return self._proxy.property.DeviceIsMediaAvailable
# Drive methods
def eject(self, unmount=False):
"""Eject media from the device."""
return self._proxy.method.DriveEject(
'(as)',
filter_opt({'unmount': unmount}))
def detach(self):
"""Detach the device by e.g. powering down the physical port."""
return self._proxy.method.DriveDetach('(as)', [])
# ----------------------------------------
# Block
# ----------------------------------------
# Block properties
@property
def device_file(self):
"""The filesystem path of the device block file."""
return os.path.normpath(self._proxy.property.DeviceFile)
@property
def device_presentation(self):
"""The device file path to present to the user."""
return self._proxy.property.DeviceFilePresentation
# TODO: device_size missing
@property
def id_usage(self):
"""Device usage class, for example 'filesystem' or 'crypto'."""
return self._proxy.property.IdUsage
@property
def is_crypto(self):
"""Check if the device is a crypto device."""
return self.id_usage == 'crypto'
@property
def is_ignored(self):
"""Check if the device should be ignored."""
return self._proxy.property.DevicePresentationHide
@property
def device_id(self):
"""
Return a unique and persistent identifier for the device.
This is the basename (last path component) of the symlink in
`/dev/disk/by-id/`.
"""
for filename in self._proxy.property.DeviceFileById:
parts = filename.split('/')
if parts[-2] == 'by-id':
return parts[-1]
return ''
@property
def id_type(self):
""""
Return IdType property.
This field provides further detail on IdUsage, for example:
IdUsage 'filesystem' 'crypto'
IdType 'ext4' 'crypto_LUKS'
"""
return self._proxy.property.IdType
@property
def id_label(self):
"""Label of the device if available."""
return self._proxy.property.IdLabel
@property
def id_uuid(self):
"""Device UUID."""
return self._proxy.property.IdUuid
@property
def luks_cleartext_slave(self):
"""Get luks crypto device."""
if not self.is_luks_cleartext:
return None
return self.udisks[self._proxy.property.LuksCleartextSlave]
@property
def is_luks_cleartext(self):
"""Check whether this is a luks cleartext device."""
return self._proxy.property.DeviceIsLuksCleartext
@property
def is_external(self):
"""Check if the device is external."""
return not self.is_systeminternal
@property
def is_systeminternal(self):
"""Check if the device is internal."""
return self._proxy.property.DeviceIsSystemInternal
@property
def drive(self):
"""
Get the drive containing this device.
The returned Device object is not guaranteed to be a drive.
"""
if self.is_partition:
return self.partition_slave.drive
elif self.is_luks_cleartext:
return self.luks_cleartext_slave.drive
else:
return self
root = drive
@property
def should_automount(self):
"""Check if the device should be automounted."""
return self._proxy.property.DeviceAutomountHint != 'never'
@property
def icon_name(self):
"""Return the recommended device icon name."""
return self._proxy.property.DevicePresentationIconName or 'drive-removable-media'
symbolic_icon_name = icon_name
# ----------------------------------------
# Partition
# ----------------------------------------
# Partition properties
@property
def partition_slave(self):
"""Get the partition slave (container)."""
if not self.is_partition:
return None
return self.udisks[self._proxy.property.PartitionSlave]
# ----------------------------------------
# Filesystem
# ----------------------------------------
# Filesystem properties
@property
def is_mounted(self):
"""Check if the device is mounted."""
return self._proxy.property.DeviceIsMounted
@property
def mount_paths(self):
"""Return list of active mount paths."""
if not self.is_mounted:
return []
raw_paths = self._proxy.property.DeviceMountPaths
return [os.path.normpath(path) for path in raw_paths]
# Filesystem methods
def mount(self,
fstype=None,
options=None,
auth_no_user_interaction=False):
"""Mount filesystem."""
options = (options or []) + filter_opt({
'auth_no_user_interaction': auth_no_user_interaction
})
return self._proxy.method.FilesystemMount(
'(sas)',
fstype or self.id_type,
options)
def unmount(self, force=False):
"""Unmount filesystem."""
return self._proxy.method.FilesystemUnmount(
'(as)',
filter_opt({'force': force}))
# ----------------------------------------
# Encrypted
# ----------------------------------------
# Encrypted properties
@property
def luks_cleartext_holder(self):
"""Get unlocked luks cleartext device."""
if not self.is_luks:
return None
return self.udisks[self._proxy.property.LuksHolder]
@property
def is_unlocked(self):
"""Check if device is already unlocked."""
if not self.is_luks:
return None
return self.luks_cleartext_holder
# Encrypted methods
def unlock(self, password):
"""Unlock Luks device."""
return self.udisks.update(
self._proxy.method.LuksUnlock(
'(sas)',
password,
[]))
def lock(self):
"""Lock Luks device."""
return self._proxy.method.LuksLock('(as)', [])
# ----------------------------------------
# derived properties
# ----------------------------------------
@property
def in_use(self):
"""Check whether this device is in use, i.e. mounted or unlocked."""
if self.is_mounted or self.is_unlocked:
return True
if self.is_partition_table:
for device in self.udisks:
if device.partition_slave == self and device.in_use:
return True
return False
def _CachedDeviceProperty(method):
"""Cache object path and return the current known CachedDevice state."""
key = '_' + method.__name__
def get(self):
return self._daemon[getattr(self, key, None)]
def set(self, device):
setattr(self, key, getattr(device, 'object_path', None))
return property(get, set, doc=method.__doc__)
class CachedDevice(DeviceBase):
"""
Cached device state.
Properties are cached at creation time. Methods will be invoked
dynamically via the associated DBus object.
"""
def __init__(self, device):
"""Cache all properties of the online device."""
self._device = device
self._daemon = device.udisks
def isproperty(obj):
return isinstance(obj, property)
for key, val in getmembers(device.__class__, isproperty):
try:
setattr(self, key, getattr(device, key))
except device.Exception:
setattr(self, key, None)
self.is_valid = device.is_valid
def __getattr__(self, key):
"""Resolve unknown properties and methods via the online device."""
if key.startswith('_'):
raise AttributeError(key)
return getattr(self._device, key)
# Overload properties that return Device objects to return CachedDevice
# instances instead. NOTE: the setters are implemented such that the
# returned devices will be cached at the time the property is accessed
# rather than at the time the current object was instanciated.
# FIXME: should it be different?
@_CachedDeviceProperty
def luks_cleartext_slave(self):
"""Get luks crypto device."""
pass
@_CachedDeviceProperty
def drive(self):
"""Get the drive."""
pass
@_CachedDeviceProperty
def partition_slave(self):
"""Get the partition slave (container)."""
pass
@_CachedDeviceProperty
def luks_cleartext_holder(self):
"""Get unlocked luks cleartext device."""
pass
def unlock(self, password, options=[]):
"""Unlock Luks device."""
return CachedDevice(self._device.unlock(password))
class UDisks(DBusService):
"""
Base class for UDisks service wrappers.
"""
BusName = 'org.freedesktop.UDisks'
Interface = 'org.freedesktop.UDisks'
ObjectPath = '/org/freedesktop/UDisks'
def __iter__(self):
"""Iterate over all devices."""
return filter(None, map(self.get, self.paths()))
def __getitem__(self, object_path):
return self.get(object_path)
def find(self, path):
"""
Get a device proxy by device name or any mount path of the device.
This searches through all accessible devices and compares device
path as well as mount pathes.
"""
for device in self:
if device.is_file(path):
return device
logger = logging.getLogger(__name__)
logger.warn(_('Device not found: {0}', path))
return None
class Sniffer(UDisks):
"""
UDisks DBus service wrapper.
This is a wrapper for the DBus API of the UDisks service at
'org.freedesktop.UDisks'. Access to properties and device states is
completely online, meaning the properties are requested from dbus as
they are accessed in the python object.
"""
# Construction
def __init__(self, proxy=None):
"""
Initialize an instance with the given DBus proxy object.
:param common.DBusProxy proxy: proxy to udisks object
"""
self._proxy = proxy or self.connect_service()
# Make sure the proxy object is loaded and usable:
self._proxy.property.DaemonVersion
def paths(self):
return self._proxy.method.EnumerateDevices()
def get(self, object_path):
"""Create a Device instance from object path."""
return OnlineDevice(
self,
self._proxy.object.bus.get_object(object_path))
update = get
class Daemon(Emitter, UDisks):
"""
UDisks listener daemon.
Listens to UDisks events. When a change occurs this class detects what
has changed and triggers an appropriate event. Valid events are:
- device_added / device_removed
- device_unlocked / device_locked
- device_mounted / device_unmounted
- media_added / media_removed
- device_changed / job_failed
A very primitive mechanism that gets along without external
dependencies is used for event dispatching. The methods `connect` and
`disconnect` can be used to add or remove event handlers.
"""
def __init__(self, proxy=None):
"""
Create a Daemon object and start listening to DBus events.
:param common.DBusProxy proxy: proxy to the dbus service object
A default proxy will be created if set to ``None``.
"""
event_names = ['device_added',
'device_removed',
'device_mounted',
'device_unmounted',
'media_added',
'media_removed',
'device_unlocked',
'device_locked',
'device_changed',
'job_failed']
super(Daemon, self).__init__(event_names)
proxy = proxy or self.connect_service()
sniffer = Sniffer(proxy)
self._sniffer = sniffer
self._jobs = {}
self._devices = {}
self._errors = {'mount': {}, 'unmount': {},
'unlock': {}, 'lock': {},
'eject': {}, 'detach': {}}
self.connect('device_changed', self._on_device_changed)
proxy.connect('DeviceAdded', self._device_added)
proxy.connect('DeviceRemoved', self._device_removed)
proxy.connect('DeviceChanged', self._device_changed)
proxy.connect('DeviceJobChanged', self._device_job_changed)
self._sync()
# Sniffer overrides
def paths(self):
"""Iterate over all valid cached devices."""
return (object_path
for object_path, device in self._devices.items()
if device)
def get(self, object_path):
"""Return the current cached state of the device."""
return self._devices.get(object_path)
def update(self, object_path):
device = self._sniffer.get(object_path)
cached = CachedDevice(device)
if cached or object_path not in self._devices:
self._devices[object_path] = cached
else:
self._invalidate(object_path)
return cached
# special methods
def set_error(self, device, action, message):
self._errors[action][device.object_path] = message
# events
def _on_device_changed(self, old_state, new_state):
"""Detect type of event and trigger appropriate event handlers."""
self._detect_toggle('has_media', old_state, new_state,
'media_added', 'media_removed')
self._detect_toggle('is_mounted', old_state, new_state,
'device_mounted', 'device_unmounted')
self._detect_toggle('is_unlocked', old_state, new_state,
'device_unlocked', 'device_locked')
def _detect_toggle(self, property_name, old, new, add_name, del_name):
old_valid = old and bool(getattr(old, property_name))
new_valid = new and bool(getattr(new, property_name))
# If we were notified about a started job we don't want to trigger
# an event when the device is changed, but when the job is
# completed. Otherwise we would show unmount notifications too
# early (when it's not yet safe to remove the drive).
# On the other hand, if the unmount operation is not issued via
# UDisks1, there will be no corresponding job.
cached_job = self._jobs.get(old.object_path)
action_name = self._event_mapping.get(cached_job)
if add_name and new_valid and not old_valid:
if add_name != action_name:
self.trigger(add_name, new)
elif del_name and old_valid and not new_valid:
if del_name != action_name:
self.trigger(del_name, new)
# UDisks event listeners
def _device_added(self, object_path):
"""Internal method."""
new_state = self.update(object_path)
self.trigger('device_added', new_state)
def _device_removed(self, object_path):
"""Internal method."""
old_state = self[object_path]
self._invalidate(object_path)
self.trigger('device_removed', old_state)
def _device_changed(self, object_path):
"""Internal method."""
old_state = self[object_path]
new_state = self.update(object_path)
self.trigger('device_changed', old_state, new_state)
# NOTE: it seems the UDisks1 documentation for DeviceJobChanged is
# fatally incorrect!
def _device_job_changed(self,
object_path,
job_in_progress,
job_id,
job_initiated_by_user,
job_is_cancellable,
job_percentage):
"""
Detect type of event and trigger appropriate event handlers.
Internal method.
"""
try:
if job_id:
action = self._action_mapping[job_id]
else:
action = self._jobs[object_path]
except KeyError:
# this can happen
# a) at startup, when we only see the completion of a job
# b) when we get notified about a job, which we don't handle
return
# NOTE: The here used heuristic is prone to raise conditions.
if job_in_progress:
# Cache the action name for later use:
self._jobs[object_path] = action
else:
del self._jobs[object_path]
device = self[object_path]
if self._check_success[action](device):
event = self._event_mapping[action]
self.trigger(event, device)
else:
# get and delete message, if available:
message = self._errors[action].pop(object_path, "")
self.trigger('job_failed', device, action, message)
log = logging.getLogger(__name__)
log.info(_('{0} operation failed for device: {1}',
job_id, object_path))
# used internally by _device_job_changed:
_action_mapping = {
'FilesystemMount': 'mount',
'FilesystemUnmount': 'unmount',
'LuksUnlock': 'unlock',
'LuksLock': 'lock',
'DriveDetach': 'detach',
'DriveEject': 'eject',
}
_event_mapping = {
'mount': 'device_mounted',
'unmount': 'device_unmounted',
'unlock': 'device_unlocked',
'lock': 'device_locked',
'eject': 'media_removed',
'detach': 'device_removed',
}
_check_success = {
'mount': lambda dev: dev.is_mounted,
'unmount': lambda dev: not dev or not dev.is_mounted,
'unlock': lambda dev: dev.is_unlocked,
'lock': lambda dev: not dev or not dev.is_unlocked,
'detach': lambda dev: not dev,
'eject': lambda dev: not dev or not dev.has_media,
}
# internal state keeping
def _sync(self):
"""Cache all device states."""
online_devices = {dev.object_path: dev for dev in self._sniffer}
self._devices = {
object_path: CachedDevice(device)
for object_path, device in online_devices.items()
}
def _invalidate(self, object_path):
"""Flag the device invalid. This removes it from the iteration."""
if object_path in self._devices:
update = copy(self._devices[object_path])
update.is_valid = False
self._devices[object_path] = update
|
|
from twisted.python import failure
from twisted.internet import defer
from ooni.tasks import BaseTask, TaskWithTimeout
from ooni.managers import TaskManager
class MockMeasurementFailOnce(BaseTask):
def run(self):
f = open('dummyTaskFailOnce.txt', 'w')
f.write('fail')
f.close()
if self.failure >= 1:
return defer.succeed(self)
else:
return defer.fail(failure.Failure)
class MockMeasurementManager(TaskManager):
def __init__(self):
self.successes = []
TaskManager.__init__(self)
def failed(self, failure, task):
pass
def succeeded(self, result, task):
self.successes.append((result, task))
class MockReporter(object):
def __init__(self):
self.created = defer.Deferred()
def writeReportEntry(self, entry):
pass
def createReport(self):
self.created.callback(self)
def finish(self):
pass
class MockFailure(Exception):
pass
# # from test_managers
mockFailure = failure.Failure(MockFailure('mock'))
class MockSuccessTask(BaseTask):
def run(self):
return defer.succeed(42)
class MockFailTask(BaseTask):
def run(self):
return defer.fail(mockFailure)
class MockFailOnceTask(BaseTask):
def run(self):
if self.failures >= 1:
return defer.succeed(42)
else:
return defer.fail(mockFailure)
class MockSuccessTaskWithTimeout(TaskWithTimeout):
def run(self):
return defer.succeed(42)
class MockFailTaskThatTimesOut(TaskWithTimeout):
def run(self):
return defer.Deferred()
class MockTimeoutOnceTask(TaskWithTimeout):
def run(self):
if self.failures >= 1:
return defer.succeed(42)
else:
return defer.Deferred()
class MockFailTaskWithTimeout(TaskWithTimeout):
def run(self):
return defer.fail(mockFailure)
class MockNetTest(object):
def __init__(self):
self.successes = []
def succeeded(self, measurement):
self.successes.append(measurement)
class MockMeasurement(TaskWithTimeout):
def __init__(self, net_test):
TaskWithTimeout.__init__(self)
self.netTest = net_test
def succeeded(self, result):
return self.netTest.succeeded(42)
class MockSuccessMeasurement(MockMeasurement):
def run(self):
return defer.succeed(42)
class MockFailMeasurement(MockMeasurement):
def run(self):
return defer.fail(mockFailure)
class MockFailOnceMeasurement(MockMeasurement):
def run(self):
if self.failures >= 1:
return defer.succeed(42)
else:
return defer.fail(mockFailure)
class MockDirector(object):
def __init__(self):
self.successes = []
def measurementFailed(self, failure, measurement):
pass
def measurementSucceeded(self, measurement):
self.successes.append(measurement)
## from test_reporter.py
class MockOReporter(object):
def __init__(self):
self.created = defer.Deferred()
def writeReportEntry(self, entry):
return defer.succeed(42)
def finish(self):
pass
def createReport(self):
from ooni.utils import log
log.debug("Creating report with %s" % self)
self.created.callback(self)
class MockOReporterThatFailsWrite(MockOReporter):
def writeReportEntry(self, entry):
raise MockFailure
class MockOReporterThatFailsOpen(MockOReporter):
def createReport(self):
raise MockFailure
class MockOReporterThatFailsWriteOnce(MockOReporter):
def __init__(self):
self.failure = 0
MockOReporter.__init__(self)
def writeReportEntry(self, entry):
if self.failure >= 1:
return defer.succeed(42)
else:
self.failure += 1
raise MockFailure
class MockTaskManager(TaskManager):
def __init__(self):
self.successes = []
TaskManager.__init__(self)
def failed(self, failure, task):
pass
def succeeded(self, result, task):
self.successes.append((result, task))
class MockOONIBClient(object):
def lookupTestHelpers(self, required_test_helpers):
ret = {
'default': {
'address': '127.0.0.1',
'collector': 'httpo://thirteenchars1234.onion'
}
}
for required_test_helper in required_test_helpers:
ret[required_test_helper] = {
'address': '127.0.0.1',
'collector': 'httpo://thirteenchars1234.onion'
}
return defer.succeed(ret)
|
|
#!/usr/bin/env python
"""PyTorch-specific functionality
"""
import itertools
from functools import reduce
from operator import mul
from wandb import util
from wandb.data_types import Node
import wandb
torch = None
def nested_shape(array_or_tuple, seen=None):
"""Figures out the shape of tensors possibly embedded in tuples
i.e
[0,0] returns (2)
([0,0], [0,0]) returns (2,2)
(([0,0], [0,0]),[0,0]) returns ((2,2),2)
"""
if seen is None:
seen = set()
if hasattr(array_or_tuple, "size"):
# pytorch tensors use V.size() to get size of tensor
return list(array_or_tuple.size())
elif hasattr(array_or_tuple, "get_shape"):
# tensorflow uses V.get_shape() to get size of tensor
return array_or_tuple.get_shape().as_list()
elif hasattr(array_or_tuple, "shape"):
return array_or_tuple.shape
seen.add(id(array_or_tuple))
try:
# treat object as iterable
return [
nested_shape(item, seen) if id(item) not in seen else 0
for item in list(array_or_tuple)
]
except TypeError:
# object is not actually iterable
# LB: Maybe we should throw an error?
return []
LOG_TRACK_COUNT, LOG_TRACK_THRESHOLD = range(2)
def log_track_init(log_freq):
"""create tracking structure used by log_track_update"""
l = [0] * 2
l[LOG_TRACK_THRESHOLD] = log_freq
return l
def log_track_update(log_track):
"""count (log_track[0]) up to threshold (log_track[1]), reset count (log_track[0]) and return true when reached"""
log_track[LOG_TRACK_COUNT] += 1
if log_track[LOG_TRACK_COUNT] < log_track[LOG_TRACK_THRESHOLD]:
return False
log_track[LOG_TRACK_COUNT] = 0
return True
class TorchHistory:
"""History methods specific to PyTorch"""
def __init__(self):
global torch
torch = wandb.util.get_module("torch", "Could not import torch")
self._hook_handles = {}
self._num_bins = 64
self._is_cuda_histc_supported = None
self.hook_torch = TorchGraph.hook_torch
def add_log_hooks_to_pytorch_module(
self,
module,
name=None,
prefix="",
log_parameters=True,
log_gradients=True,
log_freq=0,
):
"""This instuments hooks into the pytorch module
log_parameters - log parameters after a forward pass
log_gradients - log gradients after a backward pass
log_freq - log gradients/parameters every N batches
"""
if name is not None:
prefix = prefix + name
if not hasattr(module, "_wandb_hook_names"):
module._wandb_hook_names = []
if log_parameters:
def parameter_log_hook(module, input_, output, log_track):
if not log_track_update(log_track):
return
for name, parameter in module.named_parameters():
# for pytorch 0.3 Variables
if isinstance(parameter, torch.autograd.Variable):
data = parameter.data
else:
data = parameter
self.log_tensor_stats(data.cpu(), "parameters/" + prefix + name)
log_track_params = log_track_init(log_freq)
hook = module.register_forward_hook(
lambda mod, inp, outp: parameter_log_hook(
mod, inp, outp, log_track_params
)
)
self._hook_handles["parameters/" + prefix] = hook
module._wandb_hook_names.append("parameters/" + prefix)
if log_gradients:
for name, parameter in module.named_parameters():
if parameter.requires_grad:
log_track_grad = log_track_init(log_freq)
module._wandb_hook_names.append("gradients/" + prefix + name)
self._hook_variable_gradient_stats(
parameter, "gradients/" + prefix + name, log_track_grad
)
def log_tensor_stats(self, tensor, name):
"""Add distribution statistics on a tensor's elements to the current History entry"""
# TODO Handle the case of duplicate names.
if isinstance(tensor, tuple) or isinstance(tensor, list):
while (isinstance(tensor, tuple) or isinstance(tensor, list)) and (
isinstance(tensor[0], tuple) or isinstance(tensor[0], list)
):
tensor = [item for sublist in tensor for item in sublist]
tensor = torch.cat([t.reshape(-1) for t in tensor])
# checking for inheritance from _TensorBase didn't work for some reason
if not hasattr(tensor, "shape"):
cls = type(tensor)
raise TypeError(
"Expected Tensor, not {}.{}".format(cls.__module__, cls.__name__)
)
# HalfTensors on cpu do not support view(), upconvert to 32bit
if isinstance(tensor, torch.HalfTensor):
tensor = tensor.clone().type(torch.FloatTensor).detach()
# Sparse tensors have a bunch of implicit zeros. In order to histo them correctly,
# we have to count them up and add them to the histo ourselves.
sparse_zeros = None
if tensor.is_sparse:
# Have to call this on a sparse tensor before most other ops.
tensor = tensor.cpu().coalesce().clone().detach()
backing_values = tensor._values()
non_zero_values = backing_values.numel()
all_values = tensor.numel()
sparse_zeros = all_values - non_zero_values
tensor = backing_values
flat = tensor.reshape(-1)
# For pytorch 0.3 we use unoptimized numpy histograms (detach is new in 0.4)
if not hasattr(flat, "detach"):
tensor = flat.cpu().clone().numpy()
wandb.run._log({name: wandb.Histogram(tensor)}, commit=False)
return
if flat.is_cuda:
# TODO(jhr): see if pytorch will accept something upstream to check cuda support for ops
# until then, we are going to have to catch a specific exception to check for histc support.
if self._is_cuda_histc_supported is None:
self._is_cuda_histc_supported = True
check = torch.cuda.FloatTensor(1).fill_(0)
try:
check = flat.histc(bins=self._num_bins)
except RuntimeError as e:
# Only work around missing support with specific exception
# if str(e).startswith("_th_histc is not implemented"):
# self._is_cuda_histc_supported = False
# On second thought, 0.4.1 doesnt have support and maybe there are other issues
# lets disable more broadly for now
self._is_cuda_histc_supported = False
if not self._is_cuda_histc_supported:
flat = flat.cpu().clone().detach()
# As of torch 1.0.1.post2+nightly, float16 cuda summary ops are not supported (convert to float32)
if isinstance(flat, torch.cuda.HalfTensor):
flat = flat.clone().type(torch.cuda.FloatTensor).detach()
if isinstance(flat, torch.HalfTensor):
flat = flat.clone().type(torch.FloatTensor).detach()
# Skip logging if all values are nan or inf or the tensor is empty.
if self._no_finite_values(flat):
return
# Remove nans and infs if present. There's no good way to represent that in histograms.
flat = self._remove_infs_nans(flat)
tmin = flat.min().item()
tmax = flat.max().item()
if sparse_zeros:
# If we've got zeros to add in, make sure zero is in the hist range.
tmin = 0 if tmin > 0 else tmin
tmax = 0 if tmax < 0 else tmax
# Anecdotally, this can somehow happen sometimes. Maybe a precision error
# in min()/max() above. Swap here to prevent a runtime error.
if tmin > tmax:
tmin, tmax = tmax, tmin
tensor = flat.histc(bins=self._num_bins, min=tmin, max=tmax)
tensor = tensor.cpu().clone().detach()
bins = torch.linspace(tmin, tmax, steps=self._num_bins + 1)
# Add back zeroes from a sparse tensor.
if sparse_zeros:
bins_np = bins.numpy()
tensor_np = tensor.numpy()
bin_idx = 0
num_buckets = len(bins_np) - 1
for i in range(num_buckets):
start = bins_np[i]
end = bins_np[i + 1]
# There are 3 cases to consider here, all of which mean we've found the right bucket
# 1. The bucket range contains zero.
# 2. The bucket range lower bound *is* zero.
# 3. This is the last bucket and the bucket range upper bound is zero.
if (start <= 0 and end > 0) or (i == num_buckets - 1 and end == 0):
bin_idx = i
break
tensor_np[bin_idx] += sparse_zeros
tensor = torch.Tensor(tensor_np)
bins = torch.Tensor(bins_np)
wandb.run._log(
{name: wandb.Histogram(np_histogram=(tensor.tolist(), bins.tolist()))},
commit=False,
)
def _hook_variable_gradient_stats(self, var, name, log_track):
"""Logs a Variable's gradient's distribution statistics next time backward()
is called on it.
"""
if not isinstance(var, torch.autograd.Variable):
cls = type(var)
raise TypeError(
"Expected torch.Variable, not {}.{}".format(
cls.__module__, cls.__name__
)
)
handle = self._hook_handles.get(name)
if handle is not None and self._torch_hook_handle_is_valid(handle):
raise ValueError('A hook has already been set under name "{}"'.format(name))
def _callback(grad, log_track):
if not log_track_update(log_track):
return
self.log_tensor_stats(grad.data, name)
handle = var.register_hook(lambda grad: _callback(grad, log_track))
self._hook_handles[name] = handle
return handle
def unhook_all(self):
for handle in self._hook_handles.values():
handle.remove()
self._hook_handles = []
def unhook(self, name):
handle = self._hook_handles.pop(name)
handle.remove()
def _torch_hook_handle_is_valid(self, handle):
d = handle.hooks_dict_ref()
if d is None:
return False
else:
return handle.id in d
def _no_finite_values(self, tensor: "torch.Tensor") -> bool:
return tensor.shape == torch.Size([0]) or (~torch.isfinite(tensor)).all().item()
def _remove_infs_nans(self, tensor: "torch.Tensor") -> "torch.Tensor":
if not torch.isfinite(tensor).all():
tensor = tensor[torch.isfinite(tensor)]
return tensor
class TorchGraph(wandb.data_types.Graph):
def __init__(self):
super(TorchGraph, self).__init__("torch")
self._graph_hooks = set()
@classmethod
def hook_torch(cls, model, criterion=None, graph_idx=0):
wandb.termlog("logging graph, to disable use `wandb.watch(log_graph=False)`")
graph = TorchGraph()
graph.hook_torch_modules(model, criterion, graph_idx=graph_idx)
return graph
def create_forward_hook(self, name, graph_idx):
graph = self
def after_forward_hook(module, input, output):
if id(module) not in self._graph_hooks:
# hook already processed -> noop
return
if not isinstance(output, tuple):
output = (output,)
parameters = [
(pname, list(param.size()))
for pname, param in module.named_parameters()
]
node = Node(
id=id(module),
name=name,
class_name=str(module),
output_shape=nested_shape(output),
parameters=parameters,
num_parameters=[reduce(mul, size, 1) for (pname, size) in parameters],
)
graph.nodes_by_id[id(module)] = node
for param in module.parameters():
graph.nodes_by_id[id(param)] = node
graph.add_node(node)
if not graph.criterion_passed:
if hasattr(output[0], "grad_fn"):
graph.criterion = output[0].grad_fn
elif (
isinstance(output[0], list)
and output[0]
and hasattr(output[0][0], "grad_fn")
):
graph.criterion = output[0][0].grad_fn
# hook has been processed
self._graph_hooks -= {id(module)}
if not self._graph_hooks:
# we went through the entire graph
wandb.run.summary["graph_%i" % graph_idx] = self
return after_forward_hook
def hook_torch_modules(
self, module, criterion=None, prefix=None, graph_idx=0, parent=None
):
torch = util.get_module("torch", "Could not import torch")
layers = 0
graph = self
if hasattr(module, "_wandb_watch_called") and module._wandb_watch_called:
raise ValueError(
"You can only call `wandb.watch` once per model. Pass a new instance of the model if you need to call wandb.watch again in your code."
)
module._wandb_watch_called = True
if criterion:
graph.criterion = criterion
graph.criterion_passed = True
for name, sub_module in module.named_children():
name = name or str(layers)
if prefix:
name = prefix + "." + name
layers += 1
if not isinstance(sub_module, torch.nn.Module):
# TODO: Why does this happen?
break
# Trying to support torch >0.3 making this code complicated
# We want a list of types that we should recurse into
# Torch 0.3 uses containers
# 0.4 has ModuleList
# 0.4.1 has ModuleDict
module_types = [
getattr(torch.nn, module_classname)
for module_classname in (
"Container",
"Sequential",
"ModuleList",
"ModuleDict",
)
if hasattr(torch.nn, module_classname)
]
if parent is None:
parent = module
if isinstance(sub_module, tuple(module_types)):
self.hook_torch_modules(sub_module, prefix=name, parent=parent)
else:
self._graph_hooks |= {id(sub_module)}
graph_hook = sub_module.register_forward_hook(
self.create_forward_hook(name, graph_idx)
)
wandb.run._torch._hook_handles[
"topology/" + str(id(graph_hook))
] = graph_hook
if not hasattr(parent, "_wandb_hook_names"):
# should never happen but let's be extra safe
parent._wandb_hook_names = []
parent._wandb_hook_names.append("topology/" + str(id(graph_hook)))
@classmethod
def from_torch_layers(cls, module_graph, variable):
"""Recover something like neural net layers from PyTorch Module's and the
compute graph from a Variable.
Example output for a multi-layer RNN. We confusingly assign shared embedding values
to the encoder, but ordered next to the decoder.
rnns.0.linear.module.weight_raw rnns.0
rnns.0.linear.module.bias rnns.0
rnns.1.linear.module.weight_raw rnns.1
rnns.1.linear.module.bias rnns.1
rnns.2.linear.module.weight_raw rnns.2
rnns.2.linear.module.bias rnns.2
rnns.3.linear.module.weight_raw rnns.3
rnns.3.linear.module.bias rnns.3
decoder.weight encoder
decoder.bias decoder
"""
# TODO: We're currently not using this, but I left it here incase we want to resurrect! - CVP
torch = util.get_module("torch", "Could not import torch")
module_nodes_by_hash = {id(n): n for n in module_graph.nodes}
module_parameter_nodes = [
n for n in module_graph.nodes if isinstance(n.obj, torch.nn.Parameter)
]
names_by_pid = {id(n.obj): n.name for n in module_parameter_nodes}
reachable_param_nodes = module_graph[0].reachable_descendents()
reachable_params = {}
module_reachable_params = {}
names = {}
for pid, reachable_nodes in reachable_param_nodes.items():
node = module_nodes_by_hash[pid]
if not isinstance(node.obj, torch.nn.Module):
continue
module = node.obj
reachable_params = {} # by object id
module_reachable_params[id(module)] = reachable_params
names[node.name] = set()
for reachable_hash in reachable_nodes:
reachable = module_nodes_by_hash[reachable_hash]
if isinstance(reachable.obj, torch.nn.Parameter):
param = reachable.obj
reachable_params[id(param)] = param
names[node.name].add(names_by_pid[id(param)])
# we look for correspondences between sets of parameters used in subtrees of the
# computation graph and sets of parameters contained in subtrees of the module
# graph
node_depths = {id(n): d for n, d in module_graph[0].descendent_bfs()}
parameter_module_names = {}
parameter_modules = {}
for param_node in (
n for n in module_graph.nodes if isinstance(n.obj, torch.nn.Parameter)
):
pid = id(param_node.obj)
best_node = None
best_depth = None
best_reachable_params = None
for node in module_graph.nodes:
if not isinstance(node.obj, torch.nn.Module):
continue
module = node.obj
reachable_params = module_reachable_params[id(module)]
if pid in reachable_params:
depth = node_depths[id(node)]
if best_node is None or (len(reachable_params), depth) <= (
len(best_reachable_params),
best_depth,
):
best_node = node
best_depth = depth
best_reachable_params = reachable_params
parameter_modules[pid] = best_node
parameter_module_names[param_node.name] = best_node.name
# contains all parameters but only a minimal set of modules necessary
# to contain them (and which ideally correspond to conceptual layers)
reduced_module_graph = cls()
rmg_ids = itertools.count()
rmg_root = Node(id=next(rmg_ids), node=module_graph[0])
reduced_module_graph.add_node(rmg_root)
reduced_module_graph.root = rmg_root
rmg_nodes_by_pid = {}
module_nodes_by_pid = {id(n.obj): n for n in module_graph.nodes}
compute_graph, compute_node_vars = cls.from_torch_compute_graph(variable)
for node, _ in reversed(list(compute_graph[0].ancestor_bfs())):
param = compute_node_vars.get(node.id)
pid = id(param)
if not isinstance(param, torch.nn.Parameter):
continue
if pid not in module_nodes_by_pid:
# not all Parameters that occur in the compute graph come from the Module graph
continue
# add the nodes in the order we want to display them on the frontend
mid = id(parameter_modules[pid].obj)
if mid in rmg_nodes_by_pid:
rmg_module = rmg_nodes_by_pid[mid]
else:
rmg_module = rmg_nodes_by_pid[mid] = Node(
id=next(rmg_ids), node=module_nodes_by_pid[mid]
)
reduced_module_graph.add_node(rmg_module)
reduced_module_graph.add_edge(rmg_root, rmg_module)
rmg_param = Node(id=next(rmg_ids), node=module_nodes_by_pid[pid])
rmg_nodes_by_pid[pid] = rmg_param
reduced_module_graph.add_node(rmg_param)
reduced_module_graph.add_edge(rmg_module, rmg_param)
return reduced_module_graph
@classmethod
def node_from_module(cls, nid, module):
numpy = util.get_module("numpy", "Could not import numpy")
node = wandb.Node()
node.id = nid
node.child_parameters = 0
for parameter in module.parameters():
node.child_parameters += numpy.prod(parameter.size())
node.class_name = type(module).__name__
return node
|
|
import sys
import os
import io
import psycopg2
import xml.etree.ElementTree as ET
from lxml import etree
import math
from collections import Counter
from operator import itemgetter
import datetime
import collections
#For use with Tr-ConLL
def parse_xml(afile):
#print afile
xmldoc = ET.parse(file(afile))
root = xmldoc.getroot()
wordref = {}
toporef = {}
i = 0
sid = 0
#print root.tag
#print root.attrib
for child in root.iter('s'):
#print child.attrib
#sid = child.attrib['id']
sid += 1
#print sid
for sub in child:
i += 1
#print sub.tag, sub.attrib
if sub.tag == "w":
#print sub.attrib['tok']
wordref[i] = sub.attrib['tok']
elif sub.tag == "toponym":
#print sub.attrib['term']
wordref[i] = sub.attrib['term']
for sub2 in sub:
for sub3 in sub2:
if "selected" in sub3.attrib:
#print sub3.attrib
toporef[i] = [wordref[i], sub3.attrib]
return wordref, toporef
def getContext_NoteTopos(wordref, i, window, stopwords, toporef):
j = i
contextlist = [[wordref[j], "MainTopo", (i-j)]]
while j > 1:
j = j - 1
if i - window >= j:
break
if j in toporef:
if " " in wordref[j]:
contextlist.append([wordref[j].strip().replace(" ", "|"), "OtherTopo", (i-j)])
else:
contextlist.append([wordref[j], "OtherTopo", (i-j)])
elif wordref[j] not in stopwords:
try:
#u1 = unicode(wordref[j], 'utf-8')
if len(wordref[j]) == 1 and block(wordref[j]) == "General Punctuation":
pass
#print "~~~~Forbidden Character~~~~"
#print wordref[j]
#print "~~~~~~~~~~~~~~~~~~~~~"
#sys.exit()
else:
contextlist.append([wordref[j], "Word", (i-j)])
except:
#print "~~~~Broken String~~~~"
#print wordref[j]
pass
# print "~~~~~~~~~~~~~~~~~~~~~"
#print len(contextlist)
j = i
while j < len(wordref):
j = j + 1
if i + window < j:
break
if j in toporef:
if " " in wordref[j]:
contextlist.append([wordref[j].strip().replace(" ", "|"), "OtherTopo", (i-j)])
else:
contextlist.append([wordref[j], "OtherTopo", (i-j)])
elif wordref[j] not in stopwords:
try:
if len(wordref[j]) == 1 and block(wordref[j]) == "General Punctuation":
pass
#print "~~~~Forbidden Character~~~~"
#print wordref[j]
#print "~~~~~~~~~~~~~~~~~~~~~"
#sys.exit()
else:
contextlist.append([wordref[j], "Word", (i-j)])
except:
pass
#print "~~~~Broken String~~~~"
#print wordref[j]
# print "~~~~~~~~~~~~~~~~~~~~~"
return contextlist
def updateInPlace(a,b):
a.update(b)
return a
def getCorrectTable(word, tab1, tab2, tab3):
tablelist = ['enwiki20130102_ner_final_atoi', 'enwiki20130102_ner_final_jtos', 'enwiki20130102_ner_final_ttoz', 'enwiki20130102_ner_final_other']
table = ""
if len(word) > 0:
if word[0].lower() in tab1:
table = 'enwiki20130102_ner_final_atoi'
elif word[0].lower() in tab2:
table = 'enwiki20130102_ner_final_jtos'
elif word[0].lower() in tab3:
table = 'enwiki20130102_ner_final_ttoz'
else:
table = 'enwiki20130102_ner_final_other'
return table
def calc(in_domain_stat_tbl, out_domain_stat_tbl, test_xml, conn_info, gtbl, window, percentile,
tst_tbl, results_file):
print "In Domain Local Statistics Table Name: ", in_domain_stat_tbl
print "Out of domain Local Statistics Table Name: ", out_domain_stat_tbl
print "Test XML directory/file path: ", test_xml
print "DB conneciton info: ", conn_info
print "Grid table used: ", gtbl
print "Window size", window
print "Percentile: ", percentile
#print "Place name weight: ", place_name_weight
#print "Country table name: ", country_tbl
#print "Region table name: ", region_tbl
#print "State table name: ", state_tbl
#print "Out of Domain Lambda", out_corp_lamb
#print "In Domain Lambda", in_corp_lamb
print "Test table name:", tst_tbl
conn = psycopg2.connect(conn_info)
print "Connection Success"
stopwords = set(['.',',','(',')','-', '--', u'\u2010', u'\u2011', u'\u2012', u'\u2013','=',";",':',"'",'"','$','the','a','an','that','this',
'to', 'be', 'have', 'has', 'is', 'are', 'was', 'am', "'s",
'and', 'or','but',
'by', 'of', 'from','in','after','on','for', 'to', 'TO',
'I', 'me', 'he', 'him', 'she', 'her', 'we', 'us', 'you', 'your', 'yours' 'they', 'them', 'their', 'it', 'its'])
#stopwords = set([unicode(w, 'utf-8') for w in sw])
cur = conn.cursor()
lat_long_lookup = {}
SQL2 = "SELECT gid, ST_Y(geog::geometry), ST_X(geog::geometry) from %s ;" % gtbl
cur.execute(SQL2)
lat_long_lookup = dict([(g[0], [g[1],g[2]]) for g in cur.fetchall()])
print len(lat_long_lookup)
point_total_correct = 0
poly_total_correct = 0
m = 0
Observations = {}
start_time = datetime.datetime.now()
if os.path.isdir(test_xml) == True:
print "Reading as directory"
files = os.listdir(test_xml)
point_bigerror = []
poly_bigerror = []
point_dist_list = []
poly_dist_list = []
total_topo = 0
closest_gids = set([])
opf = io.open(results_file, 'w', encoding='utf-8')
opf.close()
print "Getting closest gids..."
for xml in files:
Observations = {}
print xml
wordref, toporef = parse_xml(test_xml+'/'+xml)
Observations = get_solutions(wordref, toporef, xml, tst_tbl, window, out_domain_stat_tbl, in_domain_stat_tbl, cur, stopwords, lat_long_lookup, results_file, total_topo, Observations)
opf = io.open(results_file, 'a', encoding='utf-8')
for ob in Observations:
#print ob
#for sol in Observations[ob]['solutions']:
# print sol
#s1 = [str.join('', [' ', feat[0], ':', str(feat[1])]) for feat in FeatureList]
#sys.exit()
s1 = [' '.join(['', sol.encode('utf-8')]) for sol in Observations[ob]['solutions']]
#for item in s1:
# print item
s2 = ''.join(s1)
s3 = [' '.join(['', feat.encode('utf-8')]) for feat in Observations[ob]['features']]
s4 = ''.join(s3)
row = (s2 + " |" + s4 + '\r\n').decode('utf-8')
opf.write(row)
opf.close()
#sys.exit()
#point_error_sum, poly_error_sum, total_topo, point_bigerror, poly_bigerror, point_dist_list, poly_dist_list, point_total_correct, poly_total_correct = VectorSum(wordref, toporef, total_topo, point_error_sum, poly_error_sum, cur, lat_long_lookup,
# percentile, window, stopwords, place_name_weight, xml, point_bigerror, poly_bigerror, point_dist_list, poly_dist_list, country_tbl, region_tbl, state_tbl,
# geonames_tbl, point_total_correct, poly_total_correct, tst_tbl, cntry_alt, region_alt, state_alt, pplc_alt, in_domain_stat_tbl, in_corp_lamb, out_corp_lamb)
#error_sum2 = MostOverlap(wordref, toporef, error_sum2, cur, lat_long_lookup, stat_tbl, percentile, window, stopwords, place_name_weight, xml)
conn.close()
def get_solutions(wordref, toporef, xml, tst_tbl, window, table, in_domain_stat_tbl, cur, stopwords, lat_long_lookup, results_file, total_topo, Observations):
tab1 = [chr(item) for item in range(ord('a'), ord('i')+1)]
tab2 = [chr(item) for item in range(ord('j'), ord('s')+1)]
tab3 = [chr(item) for item in range(ord('t'), ord('z')+1)]
for j in toporef:
Observations[str(j)+'-'+xml] = {}
topobase = toporef[j][0]
total_topo += 1
#print topobase, total_topo
topotokens = []
contextlist = getContext_NoteTopos(wordref, j, window, stopwords, toporef)
#This section attempts to enforce regularity in case. Attempt to force title case on all place names, except for acronyms
if topobase.title() != topobase and (len(toporef[j][0]) != 2 and len(toporef[j][0]) != 3):
#contextlist.append(topobase.title())
contextlist.append([topobase.title(), "MainTopo", 0])
#topotokens.append(toporef[j][0].title())
#topobase = topobase.title()
#print contextlist
#print "Inside title case changer"
#print topobase
#Change acronyms with periods into regular acronyms
if "." in topobase and ". " not in topobase.strip():
combinedtokens = ""
for token in topobase.split("."):
combinedtokens = combinedtokens + token
#topotokens.append(token)
#contextlist.append(token)
#topotokens.append(topobase.replace('.', ''))
topotokens.append(combinedtokens)
#contextlist.append(combinedtokens)
contextlist.append([combinedtokens, "MainTopo", 0])
else: topotokens.append(topobase)
gazet_topos = topotokens
if " " in topobase:
topotokens.append(topobase.replace(" ", '_'))
#contextlist.append(topobase.replace(" ", '|'))
contextlist.append([topobase.replace(" ", '_'), "MainTopo", 0])
#for token in topobase.split(" "):
# topotokens.append(token)
# contextlist.append(token)
#print toporef[j]
gold_lat = float(toporef[j][1]['lat'])
gold_long = float(toporef[j][1]['long'])
gold_doc = xml
gold_wid = j
#Currently finds closest global grid points to lat long in corpus
#Need a new version that finds a set of grid points within the polygon of the gold reference
#closest_gid_SQL = "Select p1.gid from %s as p1 ORDER BY ST_Distance(p1.geog, ST_GeographyFromText('SRID=4326;POINT(%s %s)'));" % (gtbl ,gold_long, gold_lat)
closest_gid_SQL = "SELECT p2.gid, ST_Distance(p2.geog, p1.polygeog) from %s as p1, %s as p2 where p1.placename = %s and p1.docname = %s and p1.wid = %s ORDER BY ST_Distance(p2.geog, p1.polygeog) ASC;" % (tst_tbl, gtbl, '%s', '%s', '%s')
cur.execute(closest_gid_SQL, (topobase, gold_doc, gold_wid))
m = 0
print topobase
query_results = cur.fetchall()
#print len(query_results)
for gid_point in query_results:
gid = gid_point[0]
distance = gid_point[1]
#print gid, distance
if distance == 0.0:
Observations[str(j)+'-'+xml].setdefault('solutions', list()).append(str(gid)+":0")
elif m < 10:
m += 1
Observations[str(j)+'-'+xml].setdefault('solutions', list()).append(str(gid)+":"+str(m))
if m >= 10:
break
for word in contextlist:
if word[0] not in stopwords:
if word[1] == "MainTopo":
Observations[str(j)+'-'+xml].setdefault('features', list()).append("MainTopo-"+word[0].replace(' ', '_').replace('|', '_'))
elif word[1] == "OtherTopo":
#print word[0].replace(' ', '_')
Observations[str(j)+'-'+xml].setdefault('features', list()).append("OtherTopo-"+word[0].replace(' ', '_').replace('|', '_'))
else:
Observations[str(j)+'-'+xml].setdefault('features', list()).append(word[0])
if 'solutions' not in Observations[str(j)+'-'+xml]:
print "No solution found for", topobase, gold_wid, gold_doc
sys.exit("Error")
print "Number Observations found: ", len(Observations)
return Observations
in_domain_stat_tbl = "trconllf_dev_trainsplit1_kernel100k_epanech_gi"
out_domain_stat_tbl = "enwiki20130102_train_kernel100k_grid5_epanech_allwords_ner_fina"
test_xml = "/home/grant/devel/TopCluster/trconllf/xml/dev_trainsplit1"
conn_info = "dbname=topodb user=postgres host='localhost' port='5433' password='grant'"
gtbl = "globalgrid_5_clip_geog"
window = 15
percentile = 1.0
tst_tbl = "trconllf_dev"
results_file = "/home/grant/devel/TopoCluster_Thetas/TRCONLL/vowpal_wabbit/TrainingFeatures_TRCoNLL_Devsplit1.txt"
opf = io.open(results_file, 'w', encoding='utf-8')
opf.close()
calc(in_domain_stat_tbl, out_domain_stat_tbl, test_xml, conn_info, gtbl, window, percentile,
tst_tbl, results_file)
|
|
# Copyright (c) 2008 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from ctypes import *
from ctypes.util import find_library
from errno import *
from functools import partial
from platform import machine, system
from stat import S_IFDIR
from traceback import print_exc
import logging
class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
class c_utimbuf(Structure):
_fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
class c_stat(Structure):
pass # Platform dependent
_system = system()
if _system in ('Darwin', 'FreeBSD'):
_libiconv = CDLL(find_library("iconv"), RTLD_GLOBAL) # libfuse dependency
ENOTSUP = 45
c_dev_t = c_int32
c_fsblkcnt_t = c_ulong
c_fsfilcnt_t = c_ulong
c_gid_t = c_uint32
c_mode_t = c_uint16
c_off_t = c_int64
c_pid_t = c_int32
c_uid_t = c_uint32
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_uint32)
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
elif _system == 'Linux':
ENOTSUP = 95
c_dev_t = c_ulonglong
c_fsblkcnt_t = c_ulonglong
c_fsfilcnt_t = c_ulonglong
c_gid_t = c_uint
c_mode_t = c_uint
c_off_t = c_longlong
c_pid_t = c_int
c_uid_t = c_uint
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
_machine = machine()
if _machine == 'x86_64':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulong),
('st_nlink', c_ulong),
('st_mode', c_mode_t),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('__pad0', c_int),
('st_rdev', c_dev_t),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_long),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
elif _machine == 'ppc':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulonglong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
else:
# i686, use as fallback for everything else
c_stat._fields_ = [
('st_dev', c_dev_t),
('__pad1', c_ushort),
('__st_ino', c_ulong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_ino', c_ulonglong)]
else:
raise NotImplementedError('%s is not supported.' % _system)
class c_statvfs(Structure):
_fields_ = [
('f_bsize', c_ulong),
('f_frsize', c_ulong),
('f_blocks', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_bavail', c_fsblkcnt_t),
('f_files', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_favail', c_fsfilcnt_t)]
if _system == 'FreeBSD':
c_fsblkcnt_t = c_uint64
c_fsfilcnt_t = c_uint64
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
class c_statvfs(Structure):
_fields_ = [
('f_bavail', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_blocks', c_fsblkcnt_t),
('f_favail', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_files', c_fsfilcnt_t),
('f_bsize', c_ulong),
('f_flag', c_ulong),
('f_frsize', c_ulong)]
class fuse_file_info(Structure):
_fields_ = [
('flags', c_int),
('fh_old', c_ulong),
('writepage', c_int),
('direct_io', c_uint, 1),
('keep_cache', c_uint, 1),
('flush', c_uint, 1),
('padding', c_uint, 29),
('fh', c_uint64),
('lock_owner', c_uint64)]
class fuse_context(Structure):
_fields_ = [
('fuse', c_voidp),
('uid', c_uid_t),
('gid', c_gid_t),
('pid', c_pid_t),
('private_data', c_voidp)]
class fuse_operations(Structure):
_fields_ = [
('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('getdir', c_voidp), # Deprecated, use readdir
('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)),
('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('unlink', CFUNCTYPE(c_int, c_char_p)),
('rmdir', CFUNCTYPE(c_int, c_char_p)),
('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('link', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)),
('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)),
('utime', c_voidp), # Deprecated, use utimens
('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
POINTER(fuse_file_info))),
('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t,
POINTER(fuse_file_info))),
('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))),
('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
('setxattr', setxattr_t),
('getxattr', getxattr_t),
('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp,
c_char_p, POINTER(c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))),
('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
('init', CFUNCTYPE(c_voidp, c_voidp)),
('destroy', CFUNCTYPE(c_voidp, c_voidp)),
('access', CFUNCTYPE(c_int, c_char_p, c_int)),
('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(fuse_file_info))),
('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(fuse_file_info))),
('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat),
POINTER(fuse_file_info))),
('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)),
('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))),
('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong)))]
def time_of_timespec(ts):
return ts.tv_sec + ts.tv_nsec / 10 ** 9
def set_st_attrs(st, attrs):
for key, val in attrs.items():
if key in ('st_atime', 'st_mtime', 'st_ctime'):
timespec = getattr(st, key + 'spec')
timespec.tv_sec = int(val)
timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9)
elif hasattr(st, key):
setattr(st, key, val)
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
_libfuse = CDLL(_libfuse_path)
_libfuse.fuse_get_context.restype = POINTER(fuse_context)
def fuse_get_context():
"""Returns a (uid, gid, pid) tuple"""
ctxp = _libfuse.fuse_get_context()
ctx = ctxp.contents
return ctx.uid, ctx.gid, ctx.pid
class FUSE(object):
"""This class is the lower level interface and should not be subclassed
under normal use. Its methods are called by fuse.
Assumes API version 2.6 or later."""
def __init__(self, operations, mountpoint, raw_fi=False, **kwargs):
"""Setting raw_fi to True will cause FUSE to pass the fuse_file_info
class as is to Operations, instead of just the fh field.
This gives you access to direct_io, keep_cache, etc."""
self.operations = operations
self.raw_fi = raw_fi
args = ['fuse']
if kwargs.pop('foreground', False):
args.append('-f')
if kwargs.pop('debug', False):
args.append('-d')
if kwargs.pop('nothreads', False):
args.append('-s')
kwargs.setdefault('fsname', operations.__class__.__name__)
args.append('-o')
args.append(','.join(key if val == True else '%s=%s' % (key, val)
for key, val in kwargs.items()))
args.append(mountpoint)
argv = (c_char_p * len(args))(*args)
fuse_ops = fuse_operations()
for name, prototype in fuse_operations._fields_:
if prototype != c_voidp and getattr(operations, name, None):
op = partial(self._wrapper_, getattr(self, name))
setattr(fuse_ops, name, prototype(op))
_libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
sizeof(fuse_ops), None)
del self.operations # Invoke the destructor
def _wrapper_(self, func, *args, **kwargs):
"""Decorator for the methods that follow"""
try:
return func(*args, **kwargs) or 0
except OSError as e:
return -(e.errno or EFAULT)
except:
print_exc()
return -EFAULT
def getattr(self, path, buf):
return self.fgetattr(path, buf, None)
def readlink(self, path, buf, bufsize):
ret = self.operations('readlink', path).encode('utf-8')
data = create_string_buffer(ret[:bufsize - 1])
memmove(buf, data, len(data))
return 0
def mknod(self, path, mode, dev):
return self.operations('mknod', path, mode, dev)
def mkdir(self, path, mode):
return self.operations('mkdir', path, mode)
def unlink(self, path):
return self.operations('unlink', path)
def rmdir(self, path):
return self.operations('rmdir', path)
def symlink(self, source, target):
return self.operations('symlink', target, source)
def rename(self, old, new):
return self.operations('rename', old, new)
def link(self, source, target):
return self.operations('link', target, source)
def chmod(self, path, mode):
return self.operations('chmod', path, mode)
def chown(self, path, uid, gid):
return self.operations('chown', path, uid, gid)
def truncate(self, path, length):
return self.operations('truncate', path, length)
def open(self, path, fip):
fi = fip.contents
if self.raw_fi:
return self.operations('open', path, fi)
else:
fi.fh = self.operations('open', path, fi.flags)
return 0
def read(self, path, buf, size, offset, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
ret = self.operations('read', path, size, offset, fh)
if not ret:
return 0
data = create_string_buffer(ret[:size], size)
memmove(buf, data, size)
return size
def write(self, path, buf, size, offset, fip):
data = string_at(buf, size)
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('write', path, data, offset, fh)
def statfs(self, path, buf):
stv = buf.contents
attrs = self.operations('statfs', path)
for key, val in attrs.items():
if hasattr(stv, key):
setattr(stv, key, val)
return 0
def flush(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('flush', path, fh)
def release(self, path, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('release', path, fh)
def fsync(self, path, datasync, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('fsync', path, datasync, fh)
def setxattr(self, path, name, value, size, options, *args):
data = string_at(value, size)
return self.operations('setxattr', path, name, data, options, *args)
def getxattr(self, path, name, value, size, *args):
ret = self.operations('getxattr', path, name, *args)
retsize = len(ret)
buf = create_string_buffer(ret, retsize) # Does not add trailing 0
if bool(value):
if retsize > size:
return -ERANGE
memmove(value, buf, retsize)
return retsize
def listxattr(self, path, namebuf, size):
ret = self.operations('listxattr', path)
buf = create_string_buffer('\x00'.join(ret)) if ret else ''
bufsize = len(buf)
if bool(namebuf):
if bufsize > size:
return -ERANGE
memmove(namebuf, buf, bufsize)
return bufsize
def removexattr(self, path, name):
return self.operations('removexattr', path, name)
def opendir(self, path, fip):
# Ignore raw_fi
fip.contents.fh = self.operations('opendir', path)
return 0
def readdir(self, path, buf, filler, offset, fip):
# Ignore raw_fi
for item in self.operations('readdir', path, fip.contents.fh):
if isinstance(item, str):
name, st, offset = item, None, 0
name = name.encode('utf-8')
else:
name, attrs, offset = item
if attrs:
st = c_stat()
set_st_attrs(st, attrs)
else:
st = None
if filler(buf, name, st, offset) != 0:
break
return 0
def releasedir(self, path, fip):
# Ignore raw_fi
return self.operations('releasedir', path, fip.contents.fh)
def fsyncdir(self, path, datasync, fip):
# Ignore raw_fi
return self.operations('fsyncdir', path, datasync, fip.contents.fh)
def init(self, conn):
return self.operations('init', '/')
def destroy(self, private_data):
return self.operations('destroy', '/')
def access(self, path, amode):
return self.operations('access', path, amode)
def create(self, path, mode, fip):
fi = fip.contents
if self.raw_fi:
return self.operations('create', path, mode, fi)
else:
fi.fh = self.operations('create', path, mode)
return 0
def ftruncate(self, path, length, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('truncate', path, length, fh)
def fgetattr(self, path, buf, fip):
memset(buf, 0, sizeof(c_stat))
st = buf.contents
fh = fip and (fip.contents if self.raw_fi else fip.contents.fh)
attrs = self.operations('getattr', path, fh)
set_st_attrs(st, attrs)
return 0
def lock(self, path, fip, cmd, lock):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('lock', path, fh, cmd, lock)
def utimens(self, path, buf):
if buf:
atime = time_of_timespec(buf.contents.actime)
mtime = time_of_timespec(buf.contents.modtime)
times = (atime, mtime)
else:
times = None
return self.operations('utimens', path, times)
def bmap(self, path, blocksize, idx):
return self.operations('bmap', path, blocksize, idx)
class Operations(object):
"""This class should be subclassed and passed as an argument to FUSE on
initialization. All operations should raise an OSError exception on
error.
When in doubt of what an operation should do, check the FUSE header
file or the corresponding system call man page."""
def __call__(self, op, *args):
if not hasattr(self, op):
raise OSError(EFAULT, '')
return getattr(self, op)(*args)
def access(self, path, amode):
return 0
bmap = None
def chmod(self, path, mode):
raise OSError(EROFS, '')
def chown(self, path, uid, gid):
raise OSError(EROFS, '')
def create(self, path, mode, fi=None):
"""When raw_fi is False (default case), fi is None and create should
return a numerical file handle.
When raw_fi is True the file handle should be set directly by create
and return 0."""
raise OSError(EROFS, '')
def destroy(self, path):
"""Called on filesystem destruction. Path is always /"""
pass
def flush(self, path, fh):
return 0
def fsync(self, path, datasync, fh):
return 0
def fsyncdir(self, path, datasync, fh):
return 0
def getattr(self, path, fh=None):
"""Returns a dictionary with keys identical to the stat C structure
of stat(2).
st_atime, st_mtime and st_ctime should be floats.
NOTE: There is an incombatibility between Linux and Mac OS X concerning
st_nlink of directories. Mac OS X counts all files inside the directory,
while Linux counts only the subdirectories."""
if path != '/':
raise OSError(ENOENT, '')
return dict(st_mode=(S_IFDIR | 0o755), st_nlink=2)
def getxattr(self, path, name, position=0):
raise OSError(ENOTSUP, '')
def init(self, path):
"""Called on filesystem initialization. Path is always /
Use it instead of __init__ if you start threads on initialization."""
pass
def link(self, target, source):
raise OSError(EROFS, '')
def listxattr(self, path):
return []
lock = None
def mkdir(self, path, mode):
raise OSError(EROFS, '')
def mknod(self, path, mode, dev):
raise OSError(EROFS, '')
def open(self, path, flags):
"""When raw_fi is False (default case), open should return a numerical
file handle.
When raw_fi is True the signature of open becomes:
open(self, path, fi)
and the file handle should be set directly."""
return 0
def opendir(self, path):
"""Returns a numerical file handle."""
return 0
def read(self, path, size, offset, fh):
"""Returns a string containing the data requested."""
raise OSError(ENOENT, '')
def readdir(self, path, fh):
"""Can return either a list of names, or a list of (name, attrs, offset)
tuples. attrs is a dict as in getattr."""
return ['.', '..']
def readlink(self, path):
raise OSError(ENOENT, '')
def release(self, path, fh):
return 0
def releasedir(self, path, fh):
return 0
def removexattr(self, path, name):
raise OSError(ENOTSUP, '')
def rename(self, old, new):
raise OSError(EROFS, '')
def rmdir(self, path):
raise OSError(EROFS, '')
def setxattr(self, path, name, value, options, position=0):
raise OSError(ENOTSUP, '')
def statfs(self, path):
"""Returns a dictionary with keys identical to the statvfs C structure
of statvfs(3).
On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512)."""
return {}
def symlink(self, target, source):
raise OSError(EROFS, '')
def truncate(self, path, length, fh=None):
raise OSError(EROFS, '')
def unlink(self, path):
raise OSError(EROFS, '')
def utimens(self, path, times=None):
"""Times is a (atime, mtime) tuple. If None use current time."""
return 0
def write(self, path, data, offset, fh):
raise OSError(EROFS, '')
class LoggingMixIn:
def __call__(self, op, path, *args):
logging.debug('-> %s %s %s', op, path, repr(args))
ret = '[Unknown Error]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError as e:
ret = str(e)
raise
finally:
logging.debug('<- %s %s', op, repr(ret))
|
|
import numpy as np
import scipy.sparse as sparse
from menpo.landmark import LandmarkGroup
from . import (PointCloud, UndirectedGraph, DirectedGraph, Tree, TriMesh,
PointUndirectedGraph, PointDirectedGraph, PointTree)
def stencil_grid(stencil, shape, dtype=None, format=None):
"""Construct a sparse matrix form a local matrix stencil
This function is useful for building sparse adjacency matrices according
to a specific connectivity pattern.
This function is borrowed from the PyAMG project, under the permission of
the MIT license:
The MIT License (MIT)
Copyright (c) 2008-2015 PyAMG Developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
The original version of this file can be found here:
https://github.com/pyamg/pyamg/blob/621d63411895898660e5ea078840118905bec061/pyamg/gallery/stencil.py
This file has been modified to fit the style standards of the Menpo
project.
Parameters
----------
S : `ndarray`
Matrix stencil stored in N-d array
grid : `tuple`
Tuple containing the N shape dimensions (shape)
dtype : `np.dtype`, optional
Numpy data type of the result
format : `str`, optional
Sparse matrix format to return, e.g. "csr", "coo", etc.
Returns
-------
A : sparse matrix
Sparse matrix which represents the operator given by applying
stencil stencil at each vertex of a regular shape with given dimensions.
Notes
-----
The shape vertices are enumerated as ``arange(prod(shape)).reshape(shape)``.
This implies that the last shape dimension cycles fastest, while the
first dimension cycles slowest. For example, if ``shape=(2,3)`` then the
shape vertices are ordered as ``(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)``.
This coincides with the ordering used by the NumPy functions
``ndenumerate()`` and ``mgrid()``.
Raises
------
ValueError
If the stencil shape is not odd.
ValueError
If the stencil dimension does not equal the number of shape dimensions
ValueError
If the shape dimensions are not all positive
Examples
--------
>>> import numpy as np
>>> from menpo.shape import stencil_grid
>>> stencil = [[0,-1,0],[-1,4,-1],[0,-1,0]] # 2D Poisson stencil
>>> shape = (3, 3) # 2D shape with shape 3x3
>>> A = stencil_grid(stencil, shape, dtype=np.float, format='csr')
>>> A.todense()
matrix([[ 4., -1., 0., -1., 0., 0., 0., 0., 0.],
[-1., 4., -1., 0., -1., 0., 0., 0., 0.],
[ 0., -1., 4., 0., 0., -1., 0., 0., 0.],
[-1., 0., 0., 4., -1., 0., -1., 0., 0.],
[ 0., -1., 0., -1., 4., -1., 0., -1., 0.],
[ 0., 0., -1., 0., -1., 4., 0., 0., -1.],
[ 0., 0., 0., -1., 0., 0., 4., -1., 0.],
[ 0., 0., 0., 0., -1., 0., -1., 4., -1.],
[ 0., 0., 0., 0., 0., -1., 0., -1., 4.]])
>>> stencil = [[0,1,0],[1,0,1],[0,1,0]] # 2D Lattice Connectivity
>>> shape = (3, 3) # 2D shape with shape 3x3
>>> A = stencil_grid(stencil, shape, dtype=np.float, format='csr')
>>> A.todense()
matrix([[ 0., 1., 0., 1., 0., 0., 0., 0., 0.],
[ 1., 0., 1., 0., 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0., 1., 0., 0., 0.],
[ 1., 0., 0., 0., 1., 0., 1., 0., 0.],
[ 0., 1., 0., 1., 0., 1., 0., 1., 0.],
[ 0., 0., 1., 0., 1., 0., 0., 0., 1.],
[ 0., 0., 0., 1., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1., 0., 1., 0., 1.],
[ 0., 0., 0., 0., 0., 1., 0., 1., 0.]])
"""
stencil = np.asarray(stencil, dtype=dtype)
shape = tuple(shape)
if not (np.asarray(stencil.shape) % 2 == 1).all():
raise ValueError('all stencil dimensions must be odd')
if len(shape) != np.ndim(stencil):
raise ValueError('stencil dimension must equal number of shape\
dimensions')
if min(shape) < 1:
raise ValueError('shape dimensions must be positive')
N_v = np.prod(shape) # number of vertices in the mesh
N_s = (stencil != 0).sum() # number of nonzero stencil entries
# diagonal offsets
diags = np.zeros(N_s, dtype=int)
# compute index offset of each dof within the stencil
strides = np.cumprod([1] + list(reversed(shape)))[:-1]
indices = tuple(i.copy() for i in stencil.nonzero())
for i, s in zip(indices, stencil.shape):
i -= s // 2
# i = (i - s) // 2
# i = i // 2
# i = i - (s // 2)
for stride, coords in zip(strides, reversed(indices)):
diags += stride * coords
data = stencil[stencil != 0].repeat(N_v).reshape(N_s, N_v)
indices = np.vstack(indices).T
# zero boundary connections
for index, diag in zip(indices, data):
diag = diag.reshape(shape)
for n, i in enumerate(index):
if i > 0:
s = [slice(None)] * len(shape)
s[n] = slice(0, i)
diag[s] = 0
elif i < 0:
s = [slice(None)]*len(shape)
s[n] = slice(i, None)
diag[s] = 0
# remove diagonals that lie outside matrix
mask = abs(diags) < N_v
if not mask.all():
diags = diags[mask]
data = data[mask]
# sum duplicate diagonals
if len(np.unique(diags)) != len(diags):
new_diags = np.unique(diags)
new_data = np.zeros((len(new_diags), data.shape[1]),
dtype=data.dtype)
for dia, dat in zip(diags, data):
n = np.searchsorted(new_diags, dia)
new_data[n, :] += dat
diags = new_diags
data = new_data
return sparse.dia_matrix((data, diags),
shape=(N_v, N_v)).asformat(format)
def _get_points_and_number_of_vertices(shape):
if isinstance(shape, LandmarkGroup):
return shape.lms.points, shape.n_landmarks
elif isinstance(shape, PointCloud):
return shape.points, shape.n_points
else:
raise ValueError("shape must be either a LandmarkGroup or a "
"PointCloud instance.")
def _get_star_graph_edges(vertices_list, root_vertex):
edges = []
for v in vertices_list:
if v != root_vertex:
edges.append([root_vertex, v])
return edges
def _get_complete_graph_edges(vertices_list):
n_vertices = len(vertices_list)
edges = []
for i in range(n_vertices-1):
k = i + 1
for j in range(k, n_vertices, 1):
v1 = vertices_list[i]
v2 = vertices_list[j]
edges.append([v1, v2])
return edges
def _get_chain_graph_edges(vertices_list, closed):
n_vertices = len(vertices_list)
edges = []
for i in range(n_vertices-1):
k = i + 1
v1 = vertices_list[i]
v2 = vertices_list[k]
edges.append([v1, v2])
if closed:
v1 = vertices_list[-1]
v2 = vertices_list[0]
edges.append([v1, v2])
return edges
def empty_graph(shape, return_pointgraph=True):
r"""
Returns an empty graph given the landmarks configuration of a shape
instance.
Parameters
----------
shape : :map:`PointCloud` or :map:`LandmarkGroup` or subclass
The shape instance that defines the landmarks configuration based on
which the graph will be created.
return_pointgraph : `bool`, optional
If ``True``, then a :map:`PointUndirectedGraph` instance will be
returned. If ``False``, then an :map:`UndirectedGraph` instance will be
returned.
Returns
-------
graph : :map:`UndirectedGraph` or :map:`PointUndirectedGraph`
The generated graph.
"""
# get points and number of vertices
points, n_vertices = _get_points_and_number_of_vertices(shape)
# create empty edges
edges = None
# return graph
if return_pointgraph:
return PointUndirectedGraph.init_from_edges(points, edges, n_vertices,
skip_checks=True)
else:
return UndirectedGraph.init_from_edges(edges, n_vertices,
skip_checks=True)
def star_graph(shape, root_vertex, graph_cls=PointTree):
r"""
Returns a star graph given the landmarks configuration of a shape instance.
Parameters
----------
shape : :map:`PointCloud` or :map:`LandmarkGroup` or subclass
The shape instance that defines the landmarks configuration based on
which the graph will be created.
root_vertex : `int`
The root of the star tree.
graph_cls : `Graph` or `PointGraph` subclass
The output graph type.
Possible options are ::
{:map:`UndirectedGraph`, :map:`DirectedGraph`, :map:`Tree`,
:map:`PointUndirectedGraph`, :map:`PointDirectedGraph`,
:map:`PointTree`}
Returns
-------
graph : `Graph` or `PointGraph` subclass
The generated graph.
Raises
------
ValueError
graph_cls must be UndirectedGraph, DirectedGraph, Tree,
PointUndirectedGraph, PointDirectedGraph or PointTree.
"""
# get points and number of vertices
points, n_vertices = _get_points_and_number_of_vertices(shape)
# create star graph edges
edges = _get_star_graph_edges(range(n_vertices), root_vertex)
# return graph
if graph_cls == Tree:
return graph_cls.init_from_edges(edges=edges, n_vertices=n_vertices,
root_vertex=root_vertex,
skip_checks=True)
elif graph_cls == PointTree:
return graph_cls.init_from_edges(points=points, edges=edges,
root_vertex=root_vertex,
skip_checks=True)
elif graph_cls == UndirectedGraph or graph_cls == DirectedGraph:
return graph_cls.init_from_edges(edges=edges, n_vertices=n_vertices,
skip_checks=True)
elif graph_cls == PointUndirectedGraph or graph_cls == PointDirectedGraph:
return graph_cls.init_from_edges(points=points, edges=edges,
skip_checks=True)
else:
raise ValueError("graph_cls must be UndirectedGraph, DirectedGraph, "
"Tree, PointUndirectedGraph, PointDirectedGraph or "
"PointTree.")
def complete_graph(shape, graph_cls=PointUndirectedGraph):
r"""
Returns a complete graph given the landmarks configuration of a shape
instance.
Parameters
----------
shape : :map:`PointCloud` or :map:`LandmarkGroup` or subclass
The shape instance that defines the landmarks configuration based on
which the graph will be created.
graph_cls : `Graph` or `PointGraph` subclass
The output graph type.
Possible options are ::
{:map:`UndirectedGraph`, :map:`DirectedGraph`,
:map:`PointUndirectedGraph`, :map:`PointDirectedGraph`}
Returns
-------
graph : `Graph` or `PointGraph` subclass
The generated graph.
Raises
------
ValueError
graph_cls must be UndirectedGraph, DirectedGraph, PointUndirectedGraph
or PointDirectedGraph.
"""
# get points and number of vertices
points, n_vertices = _get_points_and_number_of_vertices(shape)
# create complete graph edges
edges = _get_complete_graph_edges(range(n_vertices))
# return graph
if graph_cls == UndirectedGraph or graph_cls == DirectedGraph:
return graph_cls.init_from_edges(edges=edges, n_vertices=n_vertices,
skip_checks=True)
elif graph_cls == PointUndirectedGraph or graph_cls == PointDirectedGraph:
return graph_cls.init_from_edges(points=points, edges=edges,
skip_checks=True)
else:
raise ValueError("graph_cls must be UndirectedGraph, DirectedGraph, "
"PointUndirectedGraph or PointDirectedGraph.")
def chain_graph(shape, graph_cls=PointDirectedGraph, closed=False):
r"""
Returns a chain graph given the landmarks configuration of a shape instance.
Parameters
----------
shape : :map:`PointCloud` or :map:`LandmarkGroup` or subclass
The shape instance that defines the landmarks configuration based on
which the graph will be created.
graph_cls : `Graph` or `PointGraph` subclass
The output graph type.
Possible options are ::
{:map:`UndirectedGraph`, :map:`DirectedGraph`, :map:`Tree`,
:map:`PointUndirectedGraph`, :map:`PointDirectedGraph`,
:map:`PointTree`}
closed : `bool`, optional
If ``True``, then the chain will be closed (i.e. edge between the
first and last vertices).
Returns
-------
graph : `Graph` or `PointGraph` subclass
The generated graph.
Raises
------
ValueError
A closed chain graph cannot be a Tree or PointTree instance.
ValueError
graph_cls must be UndirectedGraph, DirectedGraph, Tree,
PointUndirectedGraph, PointDirectedGraph or PointTree.
"""
# get points and number of vertices
points, n_vertices = _get_points_and_number_of_vertices(shape)
# create chain graph edges
edges = _get_chain_graph_edges(range(n_vertices), closed=closed)
# return graph
if graph_cls == Tree:
if closed:
raise ValueError("A closed chain graph cannot be a Tree "
"instance.")
else:
return graph_cls.init_from_edges(edges=edges, n_vertices=n_vertices,
root_vertex=0, skip_checks=True)
elif graph_cls == PointTree:
if closed:
raise ValueError("A closed chain graph cannot be a PointTree "
"instance.")
else:
return graph_cls.init_from_edges(points=points, edges=edges,
root_vertex=0, skip_checks=True)
elif graph_cls == UndirectedGraph or graph_cls == DirectedGraph:
return graph_cls.init_from_edges(edges=edges, n_vertices=n_vertices,
skip_checks=True)
elif graph_cls == PointUndirectedGraph or graph_cls == PointDirectedGraph:
return graph_cls.init_from_edges(points=points, edges=edges,
skip_checks=True)
else:
raise ValueError("graph_cls must be UndirectedGraph, DirectedGraph, "
"Tree, PointUndirectedGraph, PointDirectedGraph or "
"PointTree.")
def delaunay_graph(shape, return_pointgraph=True):
r"""
Returns a graph with the edges being generated by Delaunay triangulation.
Parameters
----------
shape : :map:`PointCloud` or :map:`LandmarkGroup` or subclass
The shape instance that defines the landmarks configuration based on
which the graph will be created.
return_pointgraph : `bool`, optional
If ``True``, then a :map:`PointUndirectedGraph` instance will be
returned. If ``False``, then an :map:`UndirectedGraph` instance will be
returned.
Returns
-------
graph : :map:`UndirectedGraph` or :map:`PointUndirectedGraph`
The generated graph.
"""
# get TriMesh instance that estimates the Delaunay triangulation
if isinstance(shape, LandmarkGroup):
trimesh = TriMesh(shape.lms.points)
n_vertices = shape.n_landmarks
points = shape.lms.points
elif isinstance(shape, PointCloud):
trimesh = TriMesh(shape.points)
n_vertices = shape.n_points
points = shape.points
else:
raise ValueError("shape must be either a LandmarkGroup or a "
"PointCloud instance.")
# get edges
edges = trimesh.edge_indices()
# return graph
if return_pointgraph:
return PointUndirectedGraph.init_from_edges(
points=points, edges=edges, skip_checks=True)
else:
return UndirectedGraph.init_from_edges(
edges=edges, n_vertices=n_vertices, skip_checks=True)
|
|
# -*- coding: utf-8 -*-
"""Windows EventLog resources database reader."""
import collections
import os
import sqlite3
from plaso.containers import artifacts
from plaso.engine import path_helper
from plaso.helpers.windows import resource_files
from plaso.output import logger
class Sqlite3DatabaseFile(object):
"""Class that defines a sqlite3 database file."""
_HAS_TABLE_QUERY = (
'SELECT name FROM sqlite_master '
'WHERE type = "table" AND name = "{0:s}"')
def __init__(self):
"""Initializes the database file object."""
super(Sqlite3DatabaseFile, self).__init__()
self._connection = None
self._cursor = None
self.filename = None
self.read_only = None
def Close(self):
"""Closes the database file.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError('Cannot close database not opened.')
# We need to run commit or not all data is stored in the database.
self._connection.commit()
self._connection.close()
self._connection = None
self._cursor = None
self.filename = None
self.read_only = None
def HasTable(self, table_name):
"""Determines if a specific table exists.
Args:
table_name (str): table name.
Returns:
bool: True if the table exists.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError(
'Cannot determine if table exists database not opened.')
sql_query = self._HAS_TABLE_QUERY.format(table_name)
self._cursor.execute(sql_query)
if self._cursor.fetchone():
return True
return False
def GetValues(self, table_names, column_names, condition):
"""Retrieves values from a table.
Args:
table_names (list[str]): table names.
column_names (list[str]): column names.
condition (str): query condition such as
"log_source == 'Application Error'".
Yields:
sqlite3.row: row.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError('Cannot retrieve values database not opened.')
if condition:
condition = ' WHERE {0:s}'.format(condition)
sql_query = 'SELECT {1:s} FROM {0:s}{2:s}'.format(
', '.join(table_names), ', '.join(column_names), condition)
self._cursor.execute(sql_query)
# TODO: have a look at https://docs.python.org/2/library/
# sqlite3.html#sqlite3.Row.
for row in self._cursor:
yield {
column_name: row[column_index]
for column_index, column_name in enumerate(column_names)}
def Open(self, filename, read_only=False):
"""Opens the database file.
Args:
filename (str): filename of the database.
read_only (Optional[bool]): True if the database should be opened in
read-only mode. Since sqlite3 does not support a real read-only
mode we fake it by only permitting SELECT queries.
Returns:
bool: True if successful.
Raises:
RuntimeError: if the database is already opened.
"""
if self._connection:
raise RuntimeError('Cannot open database already opened.')
self.filename = filename
self.read_only = read_only
try:
self._connection = sqlite3.connect(filename)
except sqlite3.OperationalError:
return False
if not self._connection:
return False
self._cursor = self._connection.cursor()
if not self._cursor:
return False
return True
class WinevtResourcesSqlite3DatabaseReader(object):
"""Windows EventLog resources SQLite database reader."""
def __init__(self):
"""Initializes a Windows EventLog resources SQLite database reader."""
super(WinevtResourcesSqlite3DatabaseReader, self).__init__()
self._database_file = Sqlite3DatabaseFile()
self._resouce_file_helper = resource_files.WindowsResourceFileHelper
self._string_format = 'wrc'
def _GetEventLogProviderKey(self, log_source):
"""Retrieves the EventLog provider key.
Args:
log_source (str): EventLog source.
Returns:
str: EventLog provider key or None if not available.
Raises:
RuntimeError: if more than one value is found in the database.
"""
table_names = ['event_log_providers']
column_names = ['event_log_provider_key']
condition = 'log_source == "{0:s}"'.format(log_source)
values_list = list(self._database_file.GetValues(
table_names, column_names, condition))
number_of_values = len(values_list)
if number_of_values == 0:
return None
if number_of_values == 1:
values = values_list[0]
return values['event_log_provider_key']
raise RuntimeError('More than one value found in database.')
def _GetMessage(self, message_file_key, lcid, message_identifier):
"""Retrieves a specific message from a specific message table.
Args:
message_file_key (int): message file key.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
Raises:
RuntimeError: if more than one value is found in the database.
"""
table_name = 'message_table_{0:d}_0x{1:08x}'.format(message_file_key, lcid)
has_table = self._database_file.HasTable(table_name)
if not has_table:
return None
column_names = ['message_string']
condition = 'message_identifier == "0x{0:08x}"'.format(message_identifier)
values = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values)
if number_of_values == 0:
return None
if number_of_values == 1:
return values[0]['message_string']
raise RuntimeError('More than one value found in database.')
def _GetMessageFileKeys(self, event_log_provider_key):
"""Retrieves the message file keys.
Args:
event_log_provider_key (int): EventLog provider key.
Yields:
int: message file key.
"""
table_names = ['message_file_per_event_log_provider']
column_names = ['message_file_key']
condition = 'event_log_provider_key == {0:d}'.format(
event_log_provider_key)
generator = self._database_file.GetValues(
table_names, column_names, condition)
for values in generator:
yield values['message_file_key']
def Close(self):
"""Closes the database reader object."""
self._database_file.Close()
def GetMessage(self, log_source, lcid, message_identifier):
"""Retrieves a specific message for a specific EventLog source.
Args:
log_source (str): EventLog source.
lcid (int): language code identifier (LCID).
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
"""
event_log_provider_key = self._GetEventLogProviderKey(log_source)
if not event_log_provider_key:
return None
generator = self._GetMessageFileKeys(event_log_provider_key)
if not generator:
return None
message_string = None
for message_file_key in generator:
message_string = self._GetMessage(
message_file_key, lcid, message_identifier)
if message_string:
break
if self._string_format == 'wrc':
message_string = self._resouce_file_helper.FormatMessageStringInPEP3101(
message_string)
return message_string
def GetMetadataAttribute(self, attribute_name):
"""Retrieves the metadata attribute.
Args:
attribute_name (str): name of the metadata attribute.
Returns:
str: the metadata attribute or None.
Raises:
RuntimeError: if more than one value is found in the database.
"""
table_name = 'metadata'
has_table = self._database_file.HasTable(table_name)
if not has_table:
return None
column_names = ['value']
condition = 'name == "{0:s}"'.format(attribute_name)
values = list(self._database_file.GetValues(
[table_name], column_names, condition))
number_of_values = len(values)
if number_of_values == 0:
return None
if number_of_values == 1:
return values[0]['value']
raise RuntimeError('More than one value found in database.')
def Open(self, filename):
"""Opens the database reader object.
Args:
filename (str): filename of the database.
Returns:
bool: True if successful.
Raises:
RuntimeError: if the version or string format of the database
is not supported.
"""
if not self._database_file.Open(filename, read_only=True):
return False
version = self.GetMetadataAttribute('version')
if not version or version != '20150315':
raise RuntimeError('Unsupported version: {0:s}'.format(version))
string_format = self.GetMetadataAttribute('string_format')
if not string_format:
string_format = 'wrc'
if string_format not in ('pep3101', 'wrc'):
raise RuntimeError('Unsupported string format: {0:s}'.format(
string_format))
self._string_format = string_format
return True
class WinevtResourcesHelper(object):
"""Windows EventLog resources helper."""
# LCID 0x0409 is en-US.
DEFAULT_LCID = 0x0409
# The maximum number of cached message strings
_MAXIMUM_CACHED_MESSAGE_STRINGS = 32 * 1024
_WINEVT_RC_DATABASE = 'winevt-rc.db'
def __init__(
self, storage_reader, data_location, lcid, environment_variables):
"""Initializes Windows EventLog resources helper.
Args:
storage_reader (StorageReader): storage reader.
data_location (str): data location of the winevt-rc database.
lcid (int): Windows Language Code Identifier (LCID).
environment_variables (list[EnvironmentVariableArtifact]): environment
variable artifacts.
"""
super(WinevtResourcesHelper, self).__init__()
self._data_location = data_location
self._environment_variables = environment_variables or None
self._lcid = lcid or self.DEFAULT_LCID
self._message_string_cache = collections.OrderedDict()
self._storage_reader = storage_reader
self._windows_eventlog_message_files = None
self._windows_eventlog_providers = None
self._winevt_database_reader = None
def _CacheMessageString(self, log_source, message_identifier, message_string):
"""Caches a specific message string.
Args:
log_source (str): EventLog source, such as "Application Error".
message_identifier (int): message identifier.
message_string (str): message string.
"""
if len(self._message_string_cache) >= self._MAXIMUM_CACHED_MESSAGE_STRINGS:
self._message_string_cache.popitem(last=True)
lookup_key = '{0:s}:0x{1:08x}'.format(log_source, message_identifier)
self._message_string_cache[lookup_key] = message_string
self._message_string_cache.move_to_end(lookup_key, last=False)
def _GetCachedMessageString(self, log_source, message_identifier):
"""Retrieves a specific cached message string.
Args:
log_source (str): EventLog source, such as "Application Error".
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
"""
lookup_key = '{0:s}:0x{1:08x}'.format(log_source, message_identifier)
message_string = self._message_string_cache.get(lookup_key, None)
if message_string:
self._message_string_cache.move_to_end(lookup_key, last=False)
return message_string
def _GetWinevtRcDatabaseReader(self):
"""Opens the Windows EventLog resource database reader.
Returns:
WinevtResourcesSqlite3DatabaseReader: Windows EventLog resource
database reader or None.
"""
if not self._winevt_database_reader and self._data_location:
logger.warning((
'Falling back to {0:s}. Please make sure the Windows EventLog '
'message strings in the database correspond to those in the '
'EventLog files.').format(self._WINEVT_RC_DATABASE))
database_path = os.path.join(
self._data_location, self._WINEVT_RC_DATABASE)
if not os.path.isfile(database_path):
return None
self._winevt_database_reader = WinevtResourcesSqlite3DatabaseReader()
if not self._winevt_database_reader.Open(database_path):
self._winevt_database_reader = None
return self._winevt_database_reader
def _GetWinevtRcDatabaseMessageString(self, log_source, message_identifier):
"""Retrieves a specific Windows EventLog resource database message string.
Args:
log_source (str): EventLog source, such as "Application Error".
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
"""
database_reader = self._GetWinevtRcDatabaseReader()
if not database_reader:
return None
if self._lcid != self.DEFAULT_LCID:
message_string = database_reader.GetMessage(
log_source, self._lcid, message_identifier)
if message_string:
return message_string
return database_reader.GetMessage(
log_source, self.DEFAULT_LCID, message_identifier)
def _ReadEnvironmentVariables(self, storage_reader):
"""Reads the Windows EventLog message files.
Args:
storage_reader (StorageReader): storage reader.
"""
# TODO: read environment variables from storage reader.
_ = storage_reader
self._environment_variables = [artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')]
def _ReadWindowsEventLogMessageFiles(self, storage_reader):
"""Reads the Windows EventLog message files.
Args:
storage_reader (StorageReader): storage reader.
"""
self._windows_eventlog_message_files = {}
if storage_reader.HasAttributeContainers('windows_eventlog_message_file'):
for message_file in storage_reader.GetAttributeContainers(
'windows_eventlog_message_file'):
self._windows_eventlog_message_files[message_file.windows_path] = (
message_file.GetIdentifier())
def _ReadWindowsEventLogMessageString(
self, storage_reader, log_source, message_identifier):
"""Reads an Windows EventLog message string.
Args:
storage_reader (StorageReader): storage reader.
log_source (str): EventLog source, such as "Application Error".
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
"""
if self._environment_variables is None:
self._ReadEnvironmentVariables(storage_reader)
if self._windows_eventlog_providers is None:
self._ReadWindowsEventLogProviders(storage_reader)
if self._windows_eventlog_message_files is None:
self._ReadWindowsEventLogMessageFiles(storage_reader)
provider = self._windows_eventlog_providers.get(
log_source.lower(), None)
if not provider:
return None
if not storage_reader.HasAttributeContainers(
'windows_eventlog_message_string'):
return None
message_file_identifiers = []
for windows_path in provider.event_message_files or []:
path, filename = path_helper.PathHelper.GetWindowsSystemPath(
windows_path, self._environment_variables)
lookup_path = '\\'.join([path.lower(), filename.lower()])
message_file_identifier = self._windows_eventlog_message_files.get(
lookup_path, None)
if message_file_identifier:
message_file_identifier = message_file_identifier.CopyToString()
message_file_identifiers.append(message_file_identifier)
message_strings = []
if message_file_identifiers:
filter_expression = (
'language_identifier == {0:d} and '
'message_identifier == {1:d}').format(
self._lcid, message_identifier)
# TODO: add message_file_identifiers to filter_expression
for message_string in storage_reader.GetAttributeContainers(
'windows_eventlog_message_string',
filter_expression=filter_expression):
identifier = message_string.GetMessageFileIdentifier()
identifier = identifier.CopyToString()
if identifier in message_file_identifiers:
message_strings.append(message_string)
if not message_strings:
logger.error(
'No match for message: 0x{0:08x} of source: {1:s}'.format(
message_identifier, log_source))
# TODO: add support for mappings in the WEVT_TEMPLATE PE/COFF resource
if message_strings:
return message_strings[0].string
return None
def _ReadWindowsEventLogProviders(self, storage_reader):
"""Reads the Windows EventLog providers.
Args:
storage_reader (StorageReader): storage reader.
"""
self._windows_eventlog_providers = {}
if storage_reader.HasAttributeContainers('windows_eventlog_provider'):
for provider in storage_reader.GetAttributeContainers(
'windows_eventlog_provider'):
log_source = provider.log_source.lower()
self._windows_eventlog_providers[log_source] = provider
if provider.log_source_alias:
log_source = provider.log_source_alias.lower()
self._windows_eventlog_providers[log_source] = provider
def GetMessageString(self, log_source, message_identifier):
"""Retrieves a specific Windows EventLog message string.
Args:
log_source (str): EventLog source, such as "Application Error".
message_identifier (int): message identifier.
Returns:
str: message string or None if not available.
"""
message_string = self._GetCachedMessageString(
log_source, message_identifier)
if not message_string:
if self._storage_reader and self._storage_reader.HasAttributeContainers(
'windows_eventlog_provider'):
message_string = self._ReadWindowsEventLogMessageString(
self._storage_reader, log_source, message_identifier)
else:
message_string = self._GetWinevtRcDatabaseMessageString(
log_source, message_identifier)
if message_string:
self._CacheMessageString(log_source, message_identifier, message_string)
return message_string
|
|
"""Each of the data structures relevant to the API (regulations, notices,
etc.), implemented using Django models"""
import collections
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from regcore.db import interface
from regcore.models import Diff, Layer, Notice, Document
def treeify(node, tree_id, pos=1, level=0):
"""Set tree properties in memory.
"""
node['tree_id'] = tree_id
node['level'] = level
node['left'] = pos
for child in node.get('children', []):
pos = treeify(child, tree_id, pos=pos + 1, level=level + 1)
pos = pos + 1
node['right'] = pos
return pos
def build_adjacency_map(regs):
"""Build mapping from node IDs to child records
:param regs: List of `Document` records
"""
ret = collections.defaultdict(list)
for reg in regs:
if reg.parent_id is not None:
ret[reg.parent_id].append(reg)
return ret
def build_id(reg, version=None):
if version is not None:
return '{}:{}'.format(version, '-'.join(reg['label']))
return '-'.join(reg['label'])
class DMDocuments(interface.Documents):
"""Implementation of Django-models as regulations backend"""
def get(self, doc_type, label, version=None):
"""Find the regulation label + version"""
regs = Document.objects.filter(
doc_type=doc_type,
label_string=label,
version=version,
).get_descendants(
include_self=True,
)
regs = list(regs.all())
if not regs:
return None
adjacency_map = build_adjacency_map(regs)
return self._serialize(regs[0], adjacency_map)
def _serialize(self, reg, adjacency_map):
ret = {
'label': reg.label_string.split('-'),
'text': reg.text,
'node_type': reg.node_type,
'children': [
self._serialize(child, adjacency_map)
for child in adjacency_map.get(reg.id, [])
],
}
if reg.title:
ret['title'] = reg.title
return ret
def _transform(self, reg, doc_type, version=None):
"""Create the Django object"""
return Document(
id=build_id(reg, version),
doc_type=doc_type,
version=version,
parent_id=(
build_id(reg['parent'], version)
if reg.get('parent')
else None
),
tree_id=reg['tree_id'],
level=reg['level'],
lft=reg['left'],
rght=reg['right'],
label_string='-'.join(reg['label']),
text=reg['text'],
title=reg.get('title', ''),
node_type=reg['node_type'],
root=(len(reg['label']) == 1),
)
def bulk_put(self, regs, doc_type, root_label, version):
"""Store all reg objects"""
# This does not handle subparts. Ignoring that for now
Document.objects.filter(
version=version,
doc_type=doc_type,
label_string__startswith=root_label,
).delete()
treeify(regs[0], Document.objects._get_next_tree_id())
Document.objects.bulk_create(
[self._transform(r, doc_type, version) for r in regs],
batch_size=settings.BATCH_SIZE)
def listing(self, doc_type, label=None):
"""List regulation version-label pairs that match this label (or are
root, if label is None)"""
if label is None:
query = Document.objects.filter(doc_type=doc_type, root=True)
else:
query = Document.objects.filter(
doc_type=doc_type, label_string=label)
query = query.only('version', 'label_string').order_by('version')
# Flattens
versions = [v for v in query.values_list('version', 'label_string')]
return versions
class DMLayers(interface.Layers):
"""Implementation of Django-models as layers backend"""
def _transform(self, layer, layer_name, doc_type):
"""Create a Django object"""
layer = dict(layer) # copy
doc_id = layer.pop('doc_id')
return Layer(name=layer_name, layer=layer, doc_type=doc_type,
doc_id=doc_id)
def bulk_put(self, layers, layer_name, doc_type, root_doc_id):
"""Store all layer objects"""
# This does not handle subparts; Ignoring that for now
# @todo - use regex to avoid deleting 222-11 when replacing 22
Layer.objects.filter(name=layer_name, doc_type=doc_type,
doc_id__startswith=root_doc_id).delete()
Layer.objects.bulk_create(
[self._transform(l, layer_name, doc_type) for l in layers],
batch_size=settings.BATCH_SIZE)
def get(self, name, doc_type, doc_id):
"""Find the layer that matches these parameters"""
try:
layer = Layer.objects.get(name=name, doc_type=doc_type,
doc_id=doc_id)
return layer.layer
except ObjectDoesNotExist:
return None
class DMNotices(interface.Notices):
"""Implementation of Django-models as notice backend"""
def put(self, doc_number, notice):
"""Store a single notice"""
Notice.objects.filter(document_number=doc_number).delete()
model = Notice(document_number=doc_number,
fr_url=notice['fr_url'],
publication_date=notice['publication_date'],
notice=notice)
if 'effective_on' in notice:
model.effective_on = notice['effective_on']
model.save()
for cfr_part in notice.get('cfr_parts', []):
model.noticecfrpart_set.create(cfr_part=cfr_part)
def get(self, doc_number):
"""Find the associated notice"""
try:
return Notice.objects.get(
document_number=doc_number).notice
except ObjectDoesNotExist:
return None
def listing(self, part=None):
"""All notices or filtered by cfr_part"""
query = Notice.objects
if part:
query = query.filter(noticecfrpart__cfr_part=part)
results = query.values('document_number', 'effective_on', 'fr_url',
'publication_date')
for result in results:
for key in ('effective_on', 'publication_date'):
if result[key]:
result[key] = result[key].isoformat()
else:
del result[key]
return list(results) # maintain compatibility with other backends
class DMDiffs(interface.Diffs):
"""Implementation of Django-models as diff backend"""
def put(self, label, old_version, new_version, diff):
"""Store a diff between two versions of a regulation node"""
Diff.objects.filter(label=label, old_version=old_version,
new_version=new_version).delete()
Diff(label=label, old_version=old_version, new_version=new_version,
diff=diff).save()
def get(self, label, old_version, new_version):
"""Find the associated diff"""
try:
diff = Diff.objects.get(label=label, old_version=old_version,
new_version=new_version)
return diff.diff
except ObjectDoesNotExist:
return None
|
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Quantal Response Equilibrium (QRE) Stochastic Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import exploitability as exp
class Solver(object):
"""QRE Solver."""
def __init__(self, temperature=0., proj_grad=True, euclidean=False,
cheap=False, lrs=(1e-2, 1e-1), rnd_init=False, seed=None,
**kwargs):
"""Ctor."""
del kwargs
if temperature < 0.:
raise ValueError('temperature must be non-negative')
self.num_players = None
self.temperature = temperature
self.proj_grad = proj_grad
self.cheap = cheap
self.rnd_init = rnd_init
self.lrs = lrs
self.has_aux = True
self.aux_errors = []
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if len(num_strats) != num_players:
raise ValueError('Must specify num strategies for each player')
init_dist = []
for num_strats_i in num_strats:
if self.rnd_init:
init_dist_i = self.random.rand(num_strats_i)
else:
init_dist_i = np.ones(num_strats_i)
init_dist_i /= init_dist_i.sum()
init_dist.append(init_dist_i)
init_y = [np.zeros_like(dist_i) for dist_i in init_dist]
return (init_dist, init_y)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_y = grads[1]
# call ravel in case use y to track entire payoff matrices in future
grad_y_flat = np.concatenate([np.ravel(g) for g in grad_y])
self.aux_errors.append([np.linalg.norm(grad_y_flat)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
Returns:
tuple of gradients (grad_dist, grad_y), see ate.gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if self.cheap:
return cheap_gradients(self.random, *params, payoff_matrices,
self.num_players, self.temperature, self.proj_grad)
else:
return gradients(*params, payoff_matrices, self.num_players,
self.temperature, self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Compute and return tsallis entropy regularized exploitability.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
Returns:
float, exploitability of current dist
"""
return exp.qre_exploitability(params, payoff_matrices, self.temperature)
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = dist_i - lr_dist * dist_grad_i
new_dist_i = simplex.euclidean_projection_onto_simplex(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = []
for y_i, y_grad_i in zip(params[1], grads[1]):
new_y_i = y_i - lr_y * y_grad_i
new_y_i = np.clip(new_y_i, 0., np.inf)
new_y.append(new_y_i)
return (new_dist, new_y)
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = np.log(np.clip(dist_i, 0., np.inf)) - lr_dist * dist_grad_i
new_dist_i = special.softmax(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = []
for y_i, y_grad_i in zip(params[1], grads[1]):
new_y_i = y_i - lr_y * y_grad_i
new_y_i = np.clip(new_y_i, 0., np.inf)
new_y.append(new_y_i)
return (new_dist, new_y)
def gradients(dist, y, payoff_matrices, num_players, temperature=0.,
proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
y: list 1-d np.arrays (same shape as dist), current est. of payoff gradient
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
shannon regularized exploitability (stochastic estimate)
"""
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = []
unreg_exp = []
reg_exp = []
for i in range(num_players):
nabla_i = np.zeros_like(dist[i])
for j in range(num_players):
if j == i:
continue
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_ij = hess_i_ij.dot(dist[j])
nabla_i += nabla_ij / float(num_players - 1)
grad_y.append(y[i] - nabla_i)
if temperature > 0:
br_i = special.softmax(y[i] / temperature)
br_i_mat = (np.diag(br_i) - np.outer(br_i, br_i)) / temperature
br_i_policy_gradient = nabla_i - temperature * (np.log(br_i) + 1)
else:
power = np.inf
s_i = np.linalg.norm(y[i], ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (y[i] == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
br_i_mat = np.zeros((br_i.size, br_i.size))
br_i_policy_gradient = np.zeros_like(br_i)
policy_gradient_i = nabla_i
if temperature > 0:
policy_gradient_i -= temperature * (np.log(dist[i]) + 1)
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(y[i]) - y[i].dot(dist[i]))
entr_br_i = temperature * special.entr(br_i).sum()
entr_dist_i = temperature * special.entr(dist[i]).sum()
reg_exp.append(y[i].dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
other_player_fx_i = (br_i - dist[i]) + br_i_mat.dot(br_i_policy_gradient)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
for i in range(num_players):
grad_dist_i = -policy_gradient[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
grad_dist_i += hess_j_ij.dot(other_player_fx[j])
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
return (grad_dist, grad_y), np.mean(unreg_exp), np.mean(reg_exp)
def cheap_gradients(random, dist, y, payoff_matrices, num_players,
temperature=0., proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses O(d^2)
compute but only a single column of payoff_matrices is used to perform the
update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: list of 1-d np.arrays, current estimate of nash distribution
y: list 1-d np.arrays (same shape as dist), current est. of payoff gradient
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
shannon regularized exploitability (stochastic estimate)
"""
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = []
unreg_exp = []
reg_exp = []
for i in range(num_players):
others = list(range(num_players))
others.remove(i)
j = np.random.choice(others)
action_j = random.choice(dist[j].size, p=dist[j])
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_i = hess_i_ij[:, action_j]
grad_y.append(y[i] - nabla_i)
if temperature > 0:
br_i = special.softmax(y[i] / temperature)
br_i_mat = (np.diag(br_i) - np.outer(br_i, br_i)) / temperature
br_i_policy_gradient = nabla_i - temperature * (np.log(br_i) + 1)
else:
power = np.inf
s_i = np.linalg.norm(y[i], ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (y[i] == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
br_i_mat = np.zeros((br_i.size, br_i.size))
br_i_policy_gradient = np.zeros_like(br_i)
policy_gradient_i = nabla_i
if temperature > 0:
policy_gradient_i -= temperature * (np.log(dist[i]) + 1)
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(y[i]) - y[i].dot(dist[i]))
entr_br_i = temperature * special.entr(br_i).sum()
entr_dist_i = temperature * special.entr(dist[i]).sum()
reg_exp.append(y[i].dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
other_player_fx_i = (br_i - dist[i]) + br_i_mat.dot(br_i_policy_gradient)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
for i in range(num_players):
grad_dist_i = -policy_gradient[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
action_u = random.choice(dist[j].size) # uniform, ~importance sampling
other_player_fx_j = dist[j].size * other_player_fx[j][action_u]
grad_dist_i += hess_j_ij[:, action_u] * other_player_fx_j
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
return (grad_dist, grad_y), np.mean(unreg_exp), np.mean(reg_exp)
|
|
'''
List View
===========
.. versionadded:: 1.5
.. note::
ListView is planned to be deprecated once `RecycleView \
<https://github.com/kivy-garden/garden.recycleview>`_ becomes stable.
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
The :class:`~kivy.uix.listview.ListView` implements an
:class:`~kivy.uix.abstractview.AbstractView` as
a vertical, scrollable,pannable list clipped to the scrollview's bounding box
and contains list item view instances.
The :class:`AbstractView` has one property: :class:`~kivy.adapters.adapter`.
The adapter can be one of the following: a
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter`, a
:class:`~kivy.adapters.listadapter.ListAdapter` or a
:class:`~kivy.adapters.dictadapter.DictAdapter`. The :class:`Adapter` can make
use of :mod:`~kivy.adapters.args_converters` to prepare you data for passing
into the constructor for each item view instantiation.
For an overview of how all these components fit together, please see the
:mod:`~kivy.adapters` module documentation.
Introduction
------------
Lists are central parts of many software projects. Kivy's approach to lists
includes providing solutions for simple lists, along with a substantial
framework for building lists of moderate to advanced complexity. For a new
user, it can be difficult to ramp up from simple to advanced. For
this reason, Kivy provides an extensive set of examples (with the Kivy package)
that you may wish to run first, to get a taste of the range of functionality
offered. You can tell from the names of the examples that they illustrate the
"ramping up" from simple to advanced:
* `kivy/examples/widgets/lists/list_simple.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_simple.py>`_
* `kivy/examples/widgets/lists/list_simple_in_kv.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_simple_in_kv.py>`_
* `kivy/examples/widgets/lists/list_simple_in_kv_2.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_simple_in_kv_2.py>`_
* `kivy/examples/widgets/lists/list_master_detail.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_master_detail.py>`_
* `kivy/examples/widgets/lists/list_two_up.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_two_up.py>`_
* `kivy/examples/widgets/lists/list_kv.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_kv.py>`_
* `kivy/examples/widgets/lists/list_composite.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_composite.py>`_
* `kivy/examples/widgets/lists/list_reset_data.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_reset_data.py>`_
* `kivy/examples/widgets/lists/list_cascade.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_cascade.py>`_
* `kivy/examples/widgets/lists/list_cascade_dict.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_cascade_dict.py>`_
* `kivy/examples/widgets/lists/list_cascade_images.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_cascade_images.py>`_
* `kivy/examples/widgets/lists/list_ops.py <https://github.com/\
kivy/kivy/tree/master/examples/widgets/lists/list_ops.py>`_
Many of the examples feature selection, some restricting selection to single
selection, where only one item at at time can be selected, and others allowing
multiple item selection. Many of the examples illustrate how selection in one
list can be connected to actions and selections in another view or another list.
Find your own way of reading the documentation here, examining the source code
for the example apps and running the examples. Some may prefer to read the
documentation through first, others may want to run the examples and view their
code. No matter what you do, going back and forth will likely be needed.
Basic Example
-------------
In its simplest form, we make a listview with 100 items::
from kivy.uix.listview import ListView
from kivy.base import runTouchApp
class MainView(ListView):
def __init__(self, **kwargs):
super(MainView, self).__init__(
item_strings=[str(index) for index in range(100)])
if __name__ == '__main__':
runTouchApp(MainView())
Or, we could declare the listview using the kv language::
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from kivy.base import runTouchApp
Builder.load_string("""
<MyListView>:
ListView:
item_strings: [str(index) for index in range(100)]
""")
class MyListView(BoxLayout):
pass
if __name__ == '__main__':
runTouchApp(MyListView())
Using an Adapter
-------------------
Behind the scenes, the basic example above uses the
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter`. When the
constructor for the :class:`~kivy.uix.listview.ListView` sees that only a list
of
strings is provided as an argument (called item_strings), it creates a
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` using the
list of strings.
"Simple" in :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` means
*without selection support*. It is a scrollable list of items that does not
respond to touch events.
To use a :class:`SimpleListAdaper` explicitly when creating a ListView instance,
do::
simple_list_adapter = SimpleListAdapter(
data=["Item #{0}".format(i) for i in range(100)],
cls=Label)
list_view = ListView(adapter=simple_list_adapter)
The instance of :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` has
a required data argument which contains data items to use for instantiating
:class:`~kivy.uix.label.Label` views for the list view (note the cls=Label
argument). The data items are strings. Each item string is set by the
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` as the *text*
argument for each Label instantiation.
You can declare a ListView with an adapter in a kv file with special attention
given to the way longer python blocks are indented::
from kivy.uix.boxlayout import BoxLayout
from kivy.base import runTouchApp
from kivy.lang import Builder
# Note the special nature of indentation in the adapter declaration, where
# the adapter: is on one line, then the value side must be given at one
# level of indentation.
Builder.load_string("""
#:import label kivy.uix.label
#:import sla kivy.adapters.simplelistadapter
<MyListView>:
ListView:
adapter:
sla.SimpleListAdapter(
data=["Item #{0}".format(i) for i in range(100)],
cls=label.Label)
""")
class MyListView(BoxLayout):
pass
if __name__ == '__main__':
runTouchApp(MyListView())
ListAdapter and DictAdapter
---------------------------
For most use cases, your data is more complex than a simple list of strings.
Selection functionality is also often needed.
The :class:`~kivy.adapters.listadapter.ListAdapter` and
:class:`~kivy.adapters.dictadapter.DictAdapter` cover these more elaborate
needs.
The :class:`~kivy.adapters.listadapter.ListAdapter` is the base class for
:class:`~kivy.adapters.dictadapter.DictAdapter`, so we can start with it.
Refer to the :class:`~kivy.adapters.listadapter.ListAdapter` docs for details,
but here is a synopses of its arguments:
* :attr:`~kivy.adapters.adapter.Adapter.data`:
strings, class instances, dicts, etc. that form the base data
for instantiating views.
* :attr:`~kivy.adapters.adapter.Adapter.cls`:
a Kivy view that is to be instantiated for each list item. There
are several built-in types available, including ListItemLabel and
ListItemButton, or you can make your own class that mixes in the
required :class:`~kivy.uix.selectableview.SelectableView`.
* :attr:`~kivy.adapters.adapter.Adapter.template`:
the name of a Kivy language (kv) template that defines the
Kivy view for each list item.
.. note::
Pick only one, cls or template, to provide as an argument.
* :attr:`~kivy.adapters.args_converters`: a function that takes a data item
object as input and
uses it to build and return an args dict, ready
to be used in a call to instantiate item views using the item view cls
or template. In the case of cls, the args dict becomes a kwargs constructor
argument. For a template, it is treated as a context
(ctx) but is essentially similar in form to the kwargs usage.
* :attr:`~kivy.adapters.listadapter.ListAdapter.selection_mode`:
a string with the value 'single',
'multiple' or other.
* :attr:`~kivy.adapters.listadapter.ListAdapter.allow_empty_selection`:
a boolean, which if False (the default), forces
there to always be a selection if there is data
available. If True, selection happens only as a
result of user action.
In narrative, we can summarize as follows:
A listview's adapter takes data items and uses an args_converter
function to transform them into arguments for creating list item view
instances, using either a cls or a kv template.
In a graphic, a summary of the relationship between a listview and its
components can be summarized as follows:
.. image:: images/adapters.png
Please refer to the :mod:`~kivy.adapters` documentation for more details.
A :class:`~kivy.adapters.dictadapter.DictAdapter` has the same arguments and
requirements as a :class:`~kivy.adapters.listadapter.ListAdapter` except for two
things:
1) There is an additional argument, sorted_keys, which must meet the
requirements of normal python dictionary keys.
2) The data argument is, as you would expect, a dict. Keys in the dict
must include the keys in the sorted_keys argument, but they may form a
superset of the keys in sorted_keys. Values may be strings, class
instances, dicts, etc. (The args_converter uses it accordingly).
Using an Args Converter
-----------------------
A :class:`~kivy.uix.listview.ListView` allows use of built-in list item views,
such as :class:`~kivy.uix.listview.ListItemButton`, your own custom item view
class or a custom kv template. Whichever type of list item view is used, an
:doc:`args_converter <api-kivy.adapters.args_converters>` function is needed to
prepare, per list data item, kwargs for the cls or the ctx for the template.
.. note::
Only the ListItemLabel, ListItemButton or custom classes like them (and
not the simple Label or Button classes) are to be used in the listview
system.
.. warning::
ListItemButton inherits the `background_normal` and `background_down`
properties from the Button widget, so the `selected_color` and
`deselected_color` are not represented faithfully by default.
Here is an args_converter for use with the built-in
:class:`~kivy.uix.listview.ListItemButton` specified as a normal Python
function::
def args_converter(row_index, an_obj):
return {'text': an_obj.text,
'size_hint_y': None,
'height': 25}
and as a lambda::
args_converter = lambda row_index, an_obj: {'text': an_obj.text,
'size_hint_y': None,
'height': 25}
In the args converter example above, the data item is assumed to be an object
(class instance), hence the reference an_obj.text.
Here is an example of an args converter that works with list data items that
are dicts::
args_converter = lambda row_index, obj: {'text': obj['text'],
'size_hint_y': None,
'height': 25}
So, it is the responsibility of the developer to code the args_converter
according to the data at hand. The row_index argument can be useful in some
cases, such as when custom labels are needed.
An Example ListView
-------------------
Now, to some example code::
from kivy.adapters.listadapter import ListAdapter
from kivy.uix.listview import ListItemButton, ListView
data = [{'text': str(i), 'is_selected': False} for i in range(100)]
args_converter = lambda row_index, rec: {'text': rec['text'],
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(data=data,
args_converter=args_converter,
cls=ListItemButton,
selection_mode='single',
allow_empty_selection=False)
list_view = ListView(adapter=list_adapter)
This listview will show 100 buttons with text of 0 to 100. The args_converter
function converts the dict items in the data and instantiates ListItemButton
views by passing these converted items into it's constructor. The
listview will only allow single selection and the first item will already be
selected as allow_empty_selection is False. For a complete discussion on these
arguments, please see the :class:`~kivy.adapters.listadapter.ListAdapter`
documentation.
The :class:`~kivy.uix.listview.ListItemLabel` works in much the same way as the
:class:`~kivy.uix.listview.ListItemButton`.
Using a Custom Item View Class
------------------------------
The data used in an adapter can be any of the normal Python types or custom
classes, as shown below. It is up to the programmer to assure that the
args_converter performs the appropriate conversions.
Here we make a simple DataItem class that has the required text and
is_selected properties::
from kivy.uix.listview import ListItemButton
from kivy.adapters.listadapter import ListAdapter
class DataItem(object):
def __init__(self, text='', is_selected=False):
self.text = text
self.is_selected = is_selected
data_items = [DataItem(text='cat'),
DataItem(text='dog'),
DataItem(text='frog')]
list_item_args_converter = lambda row_index, obj: {'text': obj.text,
'size_hint_y': None,
'height': 25}
list_adapter = ListAdapter(data=data_items,
args_converter=list_item_args_converter,
propagate_selection_to_data=True,
cls=ListItemButton)
list_view = ListView(adapter=list_adapter)
The data is passed to the :class:`~kivy.adapters.listadapter.ListAdapter` along
with an args_converter function. The propagation setting means that
the is_selected property for each data item will be set and kept in sync with
the list item views. This setting should be set to True if you wish to
initialize the view with item views already selected.
You may also use the provided :class:`~kivy.adapters.models.SelectableDataItem`
mixin to make a custom class. Instead of the "manually-constructed" DataItem
class above, we could do::
from kivy.adapters.models import SelectableDataItem
class DataItem(SelectableDataItem):
# Add properties here.
pass
:class:`~kivy.adapters.models.SelectableDataItem` is a simple mixin class that
has an is_selected property.
Using an Item View Template
---------------------------
:class:`~kivy.uix.selectableview.SelectableView` is another simple mixin class that
has required properties for a list item: text, and is_selected. To make your
own template, mix it in as follows::
from kivy.lang import Builder
Builder.load_string("""
[CustomListItem@SelectableView+BoxLayout]:
size_hint_y: ctx.size_hint_y
height: ctx.height
ListItemButton:
text: ctx.text
is_selected: ctx.is_selected
""")
A class called CustomListItem can then be instantiated for each list item. Note
that it subclasses a :class:`~kivy.uix.boxlayout.BoxLayout` and is thus a type
of :mod:`~kivy.uix.layout`. It contains a
:class:`~kivy.uix.listview.ListItemButton` instance.
Using the power of the Kivy language (kv), you can easily build composite list
items: in addition to ListItemButton, you could have a ListItemLabel or a
custom class you have defined and registered via the
:class:`~kivy.factory.Factory`.
An args_converter needs to be constructed that goes along with such a kv
template. For example, to use the kv template above::
list_item_args_converter = \\
lambda row_index, rec: {'text': rec['text'],
'is_selected': rec['is_selected'],
'size_hint_y': None,
'height': 25}
integers_dict = \\
{ str(i): {'text': str(i), 'is_selected': False} for i in range(100)}
dict_adapter = DictAdapter(sorted_keys=[str(i) for i in range(100)],
data=integers_dict,
args_converter=list_item_args_converter,
template='CustomListItem')
list_view = ListView(adapter=dict_adapter)
A dict adapter is created with 1..100 integer strings as sorted_keys, and an
integers_dict as data. integers_dict has the integer strings as keys and dicts
with text and is_selected properties. The CustomListItem defined above in the
Builder.load_string() call is set as the kv template for the list item views.
The list_item_args_converter lambda function will take each dict in
integers_dict and will return an args dict, ready for passing as the context
(ctx) for the template.
Using CompositeListItem
-----------------------
The class :class:`~kivy.uix.listview.CompositeListItem` is another option for
building advanced composite list items. The kv language approach has its
advantages, but here we build a composite list view using a plain Python::
args_converter = lambda row_index, rec: \\
{'text': rec['text'],
'size_hint_y': None,
'height': 25,
'cls_dicts': [{'cls': ListItemButton,
'kwargs': {'text': rec['text']}},
{'cls': ListItemLabel,
'kwargs': {'text': "Middle-{0}".format(rec['text']),
'is_representing_cls': True}},
{'cls': ListItemButton,
'kwargs': {'text': rec['text']}}]}
item_strings = ["{0}".format(index) for index in range(100)]
integers_dict = \\
{str(i): {'text': str(i), 'is_selected': False} for i in range(100)}
dict_adapter = DictAdapter(sorted_keys=item_strings,
data=integers_dict,
args_converter=args_converter,
selection_mode='single',
allow_empty_selection=False,
cls=CompositeListItem)
list_view = ListView(adapter=dict_adapter)
The args_converter is somewhat complicated, so we should go through the
details. Observe in the :class:`~kivy.adapters.dictadapter.DictAdapter`
instantiation that :class:`~kivy.uix.listview.CompositeListItem` instance is
set as the cls to be instantiated for each list item component. The
args_converter will
make args dicts for this cls. In the args_converter, the first three items,
text, size_hint_y, and height, are arguments for the CompositeListItem itself.
After that you see a cls_dicts list that contains argument sets for each of the
member widgets for this composite: 2
:class:`ListItemButtons <kivy.uix.listview.ListItemButton>` and a
:class:`~kivy.uix.listview.ListItemLabel`. This is a similar approach to
using a kv template described above.
For details on how :class:`~kivy.uix.listview.CompositeListItem` works,
examine the code, looking for how parsing of the cls_dicts list and kwargs
processing is done.
Uses for Selection
------------------
What can we do with selection? Combining selection with the system of bindings
in Kivy, we can build a wide range of user interface designs.
We could make data items that contain the names of dog breeds, and connect
the selection of dog breed to the display of details in another view, which
would update automatically on selection. This is done via a binding to the
:attr:`~kivy.adapters.listadapter.ListAdapter.on_selection_change` event::
list_adapter.bind(on_selection_change=callback_function)
where callback_function() gets passed the adapter as an argument and does
whatever is needed for the update. See the
example called list_master_detail.py, and imagine that the list on the left
could be a list of dog breeds, and the detail view on the right could show
details for a selected dog breed.
In another example, we could set the selection_mode of a listview to
'multiple', and load it with a list of answers to a multiple-choice question.
The question could have several correct answers. A color swatch view could be
bound to selection change, as above, so that it turns green as soon as the
correct choices are made, unless the number of touches exeeds a limit, then the
answer session could be terminated. See the examples that feature thumbnail
images to get some ideas, e.g., list_cascade_dict.py.
In a more involved example, we could chain together three listviews, where
selection in the first controls the items shown in the second, and selection in
the second controls the items shown in the third. If allow_empty_selection were
set to False for these listviews, a dynamic system of selection "cascading"
from one list to the next, would result.
There are so many ways that listviews and Kivy bindings functionality can be
used, that we have only scratched the surface here. For on-disk examples, see::
kivy/examples/widgets/lists/list_*.py
Several examples show the "cascading" behavior described above. Others
demonstrate the use of kv templates and composite list views.
'''
__all__ = ('SelectableView', 'ListItemButton', 'ListItemLabel',
'CompositeListItem', 'ListView', 'ListItemReprMixin')
from kivy.event import EventDispatcher
from kivy.clock import Clock
from kivy.compat import PY2
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.adapters.simplelistadapter import SimpleListAdapter
from kivy.uix.abstractview import AbstractView
from kivy.uix.selectableview import SelectableView
from kivy.properties import ObjectProperty, DictProperty, \
NumericProperty, ListProperty, BooleanProperty
from kivy.lang import Builder
from math import ceil, floor
class ListItemReprMixin(Label):
'''
The :class:`~kivy.uix.listview.ListItemReprMixin` provides a
:class:`~kivy.uix.label.Label` with a Python 2/3 compatible string
representation (*__repr__*). It is intended for internal usage.
'''
if PY2:
def __repr__(self):
text = self.text.encode('utf-8') if isinstance(self.text, unicode) \
else self.text
return '<%s text=%s>' % (self.__class__.__name__, text)
else:
def __repr__(self):
return '<%s text=%s>' % (self.__class__.__name__, self.text)
class ListItemButton(ListItemReprMixin, SelectableView, Button):
''':class:`~kivy.uix.listview.ListItemButton` mixes
:class:`~kivy.uix.selectableview.SelectableView` with
:class:`~kivy.uix.button.Button` to produce a button suitable for use in
:class:`~kivy.uix.listview.ListView`.
'''
selected_color = ListProperty([1., 0., 0., 1])
'''
:attr:`selected_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1., 0., 0., 1].
'''
deselected_color = ListProperty([0., 1., 0., 1])
'''
:attr:`deselected_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [0., 1., 0., 1].
'''
def __init__(self, **kwargs):
super(ListItemButton, self).__init__(**kwargs)
# Set Button bg color to be deselected_color.
self.background_color = self.deselected_color
def select(self, *args):
self.background_color = self.selected_color
if isinstance(self.parent, CompositeListItem):
self.parent.select_from_child(self, *args)
def deselect(self, *args):
self.background_color = self.deselected_color
if isinstance(self.parent, CompositeListItem):
self.parent.deselect_from_child(self, *args)
def select_from_composite(self, *args):
self.background_color = self.selected_color
def deselect_from_composite(self, *args):
self.background_color = self.deselected_color
# [TODO] Why does this mix in SelectableView -- that makes it work like
# button, which is redundant.
class ListItemLabel(ListItemReprMixin, SelectableView, Label):
''':class:`~kivy.uix.listview.ListItemLabel` mixes
:class:`~kivy.uix.selectableview.SelectableView` with
:class:`~kivy.uix.label.Label` to produce a label suitable for use in
:class:`~kivy.uix.listview.ListView`.
'''
def __init__(self, **kwargs):
super(ListItemLabel, self).__init__(**kwargs)
def select(self, *args):
self.bold = True
if isinstance(self.parent, CompositeListItem):
self.parent.select_from_child(self, *args)
def deselect(self, *args):
self.bold = False
if isinstance(self.parent, CompositeListItem):
self.parent.deselect_from_child(self, *args)
def select_from_composite(self, *args):
self.bold = True
def deselect_from_composite(self, *args):
self.bold = False
class CompositeListItem(SelectableView, BoxLayout):
''':class:`~kivy.uix.listview.CompositeListItem` mixes
:class:`~kivy.uix.selectableview.SelectableView` with :class:`BoxLayout` for a
generic container-style list item, to be used in
:class:`~kivy.uix.listview.ListView`.
'''
background_color = ListProperty([1, 1, 1, 1])
'''ListItem sublasses Button, which has background_color, but
for a composite list item, we must add this property.
:attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
selected_color = ListProperty([1., 0., 0., 1])
'''
:attr:`selected_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1., 0., 0., 1].
'''
deselected_color = ListProperty([.33, .33, .33, 1])
'''
:attr:`deselected_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [.33, .33, .33, 1].
'''
representing_cls = ObjectProperty(None)
'''Which component view class, if any, should represent for the
composite list item in __repr__()?
:attr:`representing_cls` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
def __init__(self, **kwargs):
cls_dicts = kwargs.pop('cls_dicts')
text = kwargs.pop('text', None)
index = kwargs['index']
super(CompositeListItem, self).__init__(**kwargs)
# Example data:
#
# 'cls_dicts': [{'cls': ListItemButton,
# 'kwargs': {'text': "Left"}},
# 'cls': ListItemLabel,
# 'kwargs': {'text': "Middle",
# 'is_representing_cls': True}},
# 'cls': ListItemButton,
# 'kwargs': {'text': "Right"}]
# There is an index to the data item this composite list item view
# represents. Get it from kwargs and pass it along to children in the
# loop below.
for cls_dict in cls_dicts:
cls = cls_dict['cls']
cls_kwargs = cls_dict.get('kwargs', None)
if cls_kwargs:
cls_kwargs['index'] = index
if 'text' not in cls_kwargs:
cls_kwargs['text'] = text
if 'is_representing_cls' in cls_kwargs:
del cls_kwargs['is_representing_cls']
self.representing_cls = cls
self.add_widget(cls(**cls_kwargs))
else:
cls_kwargs = {}
cls_kwargs['index'] = index
if text is not None:
cls_kwargs['text'] = text
self.add_widget(cls(**cls_kwargs))
def select(self, *args):
self.background_color = self.selected_color
def deselect(self, *args):
self.background_color = self.deselected_color
def select_from_child(self, child, *args):
for c in self.children:
if c is not child:
c.select_from_composite(*args)
def deselect_from_child(self, child, *args):
for c in self.children:
if c is not child:
c.deselect_from_composite(*args)
def __repr__(self):
if self.representing_cls is not None:
return '<%r>, representing <%s>' % (
self.representing_cls, self.__class__.__name__)
else:
return '<%s>' % (self.__class__.__name__)
Builder.load_string('''
<ListView>:
container: container
ScrollView:
pos: root.pos
on_scroll_y: root._scroll(args[1])
do_scroll_x: False
GridLayout:
cols: 1
id: container
size_hint_y: None
''')
class ListView(AbstractView, EventDispatcher):
''':class:`~kivy.uix.listview.ListView` is a primary high-level widget,
handling the common task of presenting items in a scrolling list.
Flexibility is afforded by use of a variety of adapters to interface with
data.
The adapter property comes via the mixed in
:class:`~kivy.uix.abstractview.AbstractView` class.
:class:`~kivy.uix.listview.ListView` also subclasses
:class:`EventDispatcher` for scrolling. The event *on_scroll_complete* is
used in refreshing the main view.
For a simple list of string items, without selection, use
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter`. For list items
that respond to selection, ranging from simple items to advanced
composites, use :class:`~kivy.adapters.listadapter.ListAdapter`. For an
alternate powerful adapter, use
:class:`~kivy.adapters.dictadapter.DictAdapter`, rounding out the choice
for designing highly interactive lists.
:Events:
`on_scroll_complete`: (boolean, )
Fired when scrolling completes.
'''
divider = ObjectProperty(None)
'''[TODO] Not used.
'''
divider_height = NumericProperty(2)
'''[TODO] Not used.
'''
container = ObjectProperty(None)
'''The container is a :class:`~kivy.uix.gridlayout.GridLayout` widget held
within a :class:`~kivy.uix.scrollview.ScrollView` widget. (See the
associated kv block in the Builder.load_string() setup). Item view
instances managed and provided by the adapter are added to this container.
The container is cleared with a call to clear_widgets() when the list is
rebuilt by the populate() method. A padding
:class:`~kivy.uix.widget.Widget` instance is also added as needed,
depending on the row height calculations.
:attr:`container` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
row_height = NumericProperty(None)
'''The row_height property is calculated on the basis of the height of the
container and the count of items.
:attr:`row_height` is a :class:`~kivy.properties.NumericProperty` and
defaults to None.
'''
item_strings = ListProperty([])
'''If item_strings is provided, create an instance of
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` with this list
of strings, and use it to manage a no-selection list.
:attr:`item_strings` is a :class:`~kivy.properties.ListProperty` and
defaults to [].
'''
scrolling = BooleanProperty(False)
'''If the scroll_to() method is called while scrolling operations are
happening, a call recursion error can occur. scroll_to() checks to see that
scrolling is False before calling populate(). scroll_to() dispatches a
scrolling_complete event, which sets scrolling back to False.
:attr:`scrolling` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
_index = NumericProperty(0)
_sizes = DictProperty({})
_count = NumericProperty(0)
_wstart = NumericProperty(0)
_wend = NumericProperty(-1)
__events__ = ('on_scroll_complete', )
def __init__(self, **kwargs):
# Check for an adapter argument. If it doesn't exist, we
# check for item_strings in use with SimpleListAdapter
# to make a simple list.
if 'adapter' not in kwargs:
if 'item_strings' not in kwargs:
# Could be missing, or it could be that the ListView is
# declared in a kv file. If kv is in use, and item_strings is
# declared there, then item_strings will not be set until after
# __init__(). So, the data=[] set will temporarily serve for
# SimpleListAdapter instantiation, with the binding to
# item_strings_changed() handling the eventual set of the
# item_strings property from the application of kv rules.
list_adapter = SimpleListAdapter(data=[],
cls=Label)
else:
list_adapter = SimpleListAdapter(data=kwargs['item_strings'],
cls=Label)
kwargs['adapter'] = list_adapter
super(ListView, self).__init__(**kwargs)
populate = self._trigger_populate = Clock.create_trigger(
self._spopulate, -1)
self._trigger_reset_populate = \
Clock.create_trigger(self._reset_spopulate, -1)
fbind = self.fbind
fbind('size', populate)
fbind('pos', populate)
fbind('item_strings', self.item_strings_changed)
fbind('adapter', populate)
bind_adapter = self._trigger_bind_adapter = Clock.create_trigger(
lambda dt: self.adapter.bind_triggers_to_view(
self._trigger_reset_populate),
-1)
fbind('adapter', bind_adapter)
# The bindings setup above sets self._trigger_populate() to fire
# when the adapter changes, but we also need this binding for when
# adapter.data and other possible triggers change for view updating.
# We don't know that these are, so we ask the adapter to set up the
# bindings back to the view updating function here.
bind_adapter()
# Added to set data when item_strings is set in a kv template, but it will
# be good to have also if item_strings is reset generally.
def item_strings_changed(self, *args):
self.adapter.data = self.item_strings
def _scroll(self, scroll_y):
if self.row_height is None:
return
self._scroll_y = scroll_y
scroll_y = 1 - min(1, max(scroll_y, 0))
container = self.container
mstart = (container.height - self.height) * scroll_y
mend = mstart + self.height
# convert distance to index
rh = self.row_height
istart = int(ceil(mstart / rh))
iend = int(floor(mend / rh))
istart = max(0, istart - 1)
iend = max(0, iend - 1)
if istart < self._wstart:
rstart = max(0, istart - 10)
self.populate(rstart, iend)
self._wstart = rstart
self._wend = iend
elif iend > self._wend:
self.populate(istart, iend + 10)
self._wstart = istart
self._wend = iend + 10
def _spopulate(self, *args):
self.populate()
def _reset_spopulate(self, *args):
self._wend = -1
self.populate()
# simulate the scroll again, only if we already scrolled before
# the position might not be the same, mostly because we don't know the
# size of the new item.
if hasattr(self, '_scroll_y'):
self._scroll(self._scroll_y)
def populate(self, istart=None, iend=None):
container = self.container
sizes = self._sizes
rh = self.row_height
# ensure we know what we want to show
if istart is None:
istart = self._wstart
iend = self._wend
# clear the view
container.clear_widgets()
# guess only ?
if iend is not None and iend != -1:
# fill with a "padding"
fh = 0
for x in range(istart):
fh += sizes[x] if x in sizes else rh
container.add_widget(Widget(size_hint_y=None, height=fh))
# now fill with real item_view
index = istart
while index <= iend:
item_view = self.adapter.get_view(index)
index += 1
if item_view is None:
continue
sizes[index] = item_view.height
container.add_widget(item_view)
else:
available_height = self.height
real_height = 0
index = self._index
count = 0
while available_height > 0:
item_view = self.adapter.get_view(index)
if item_view is None:
break
sizes[index] = item_view.height
index += 1
count += 1
container.add_widget(item_view)
available_height -= item_view.height
real_height += item_view.height
self._count = count
# extrapolate the full size of the container from the size
# of view instances in the adapter
if count:
container.height = \
real_height / count * self.adapter.get_count()
if self.row_height is None:
self.row_height = real_height / count
def scroll_to(self, index=0):
if not self.scrolling:
self.scrolling = True
self._index = index
self.populate()
self.dispatch('on_scroll_complete')
def on_scroll_complete(self, *args):
self.scrolling = False
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1FlowSchemaCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1beta1FlowSchemaCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
if status is not None:
self.status = status
if type is not None:
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1beta1FlowSchemaCondition. # noqa: E501
`lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1beta1FlowSchemaCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1beta1FlowSchemaCondition.
`lastTransitionTime` is the last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1beta1FlowSchemaCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1beta1FlowSchemaCondition. # noqa: E501
`message` is a human-readable message indicating details about last transition. # noqa: E501
:return: The message of this V1beta1FlowSchemaCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1beta1FlowSchemaCondition.
`message` is a human-readable message indicating details about last transition. # noqa: E501
:param message: The message of this V1beta1FlowSchemaCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1beta1FlowSchemaCondition. # noqa: E501
`reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:return: The reason of this V1beta1FlowSchemaCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1beta1FlowSchemaCondition.
`reason` is a unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1beta1FlowSchemaCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1beta1FlowSchemaCondition. # noqa: E501
`status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
:return: The status of this V1beta1FlowSchemaCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1beta1FlowSchemaCondition.
`status` is the status of the condition. Can be True, False, Unknown. Required. # noqa: E501
:param status: The status of this V1beta1FlowSchemaCondition. # noqa: E501
:type: str
"""
self._status = status
@property
def type(self):
"""Gets the type of this V1beta1FlowSchemaCondition. # noqa: E501
`type` is the type of the condition. Required. # noqa: E501
:return: The type of this V1beta1FlowSchemaCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1beta1FlowSchemaCondition.
`type` is the type of the condition. Required. # noqa: E501
:param type: The type of this V1beta1FlowSchemaCondition. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1FlowSchemaCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1FlowSchemaCondition):
return True
return self.to_dict() != other.to_dict()
|
|
from __future__ import absolute_import
from __future__ import with_statement
import warnings
from mock import patch
from kombu.connection import BrokerConnection
from kombu.exceptions import StdChannelError
from kombu.transport import virtual
from kombu.utils import uuid
from kombu.tests.compat import catch_warnings
from kombu.tests.utils import TestCase
from kombu.tests.utils import Mock, redirect_stdouts
def client(**kwargs):
return BrokerConnection(transport="kombu.transport.virtual.Transport",
**kwargs)
def memory_client():
return BrokerConnection(transport="memory")
class test_BrokerState(TestCase):
def test_constructor(self):
s = virtual.BrokerState()
self.assertTrue(hasattr(s, "exchanges"))
self.assertTrue(hasattr(s, "bindings"))
t = virtual.BrokerState(exchanges=16, bindings=32)
self.assertEqual(t.exchanges, 16)
self.assertEqual(t.bindings, 32)
class test_QoS(TestCase):
def setUp(self):
self.q = virtual.QoS(client().channel(), prefetch_count=10)
def tearDown(self):
self.q._on_collect.cancel()
def test_constructor(self):
self.assertTrue(self.q.channel)
self.assertTrue(self.q.prefetch_count)
self.assertFalse(self.q._delivered.restored)
self.assertTrue(self.q._on_collect)
@redirect_stdouts
def test_can_consume(self, stdout, stderr):
_restored = []
class RestoreChannel(virtual.Channel):
do_restore = True
def _restore(self, message):
_restored.append(message)
self.assertTrue(self.q.can_consume())
for i in range(self.q.prefetch_count - 1):
self.q.append(i, uuid())
self.assertTrue(self.q.can_consume())
self.q.append(i + 1, uuid())
self.assertFalse(self.q.can_consume())
tag1 = self.q._delivered.keys()[0]
self.q.ack(tag1)
self.assertTrue(self.q.can_consume())
tag2 = uuid()
self.q.append(i + 2, tag2)
self.assertFalse(self.q.can_consume())
self.q.reject(tag2)
self.assertTrue(self.q.can_consume())
self.q.channel = RestoreChannel(self.q.channel.connection)
tag3 = uuid()
self.q.append(i + 3, tag3)
self.q.reject(tag3, requeue=True)
self.q._flush()
self.q.restore_unacked_once()
self.assertListEqual(_restored, [11, 9, 8, 7, 6, 5, 4, 3, 2, 1])
self.assertTrue(self.q._delivered.restored)
self.assertFalse(self.q._delivered)
self.q.restore_unacked_once()
self.q._delivered.restored = False
self.q.restore_unacked_once()
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
def test_get(self):
self.q._delivered["foo"] = 1
self.assertEqual(self.q.get("foo"), 1)
class test_Message(TestCase):
def test_create(self):
c = client().channel()
data = c.prepare_message("the quick brown fox...")
tag = data["properties"]["delivery_tag"] = uuid()
message = c.message_to_python(data)
self.assertIsInstance(message, virtual.Message)
self.assertIs(message, c.message_to_python(message))
self.assertEqual(message.body,
"the quick brown fox...".encode("utf-8"))
self.assertTrue(message.delivery_tag, tag)
def test_create_no_body(self):
virtual.Message(Mock(), {
"body": None,
"properties": {"delivery_tag": 1}})
def test_serializable(self):
c = client().channel()
data = c.prepare_message("the quick brown fox...")
tag = data["properties"]["delivery_tag"] = uuid()
message = c.message_to_python(data)
dict_ = message.serializable()
self.assertEqual(dict_["body"],
"the quick brown fox...".encode("utf-8"))
self.assertEqual(dict_["properties"]["delivery_tag"], tag)
class test_AbstractChannel(TestCase):
def test_get(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._get("queue")
def test_put(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._put("queue", "m")
def test_size(self):
self.assertEqual(virtual.AbstractChannel()._size("queue"), 0)
def test_purge(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._purge("queue")
def test_delete(self):
with self.assertRaises(NotImplementedError):
virtual.AbstractChannel()._delete("queue")
def test_new_queue(self):
self.assertIsNone(virtual.AbstractChannel()._new_queue("queue"))
def test_has_queue(self):
self.assertTrue(virtual.AbstractChannel()._has_queue("queue"))
def test_poll(self):
class Cycle(object):
called = False
def get(self):
self.called = True
return True
cycle = Cycle()
self.assertTrue(virtual.AbstractChannel()._poll(cycle))
self.assertTrue(cycle.called)
class test_Channel(TestCase):
def setUp(self):
self.channel = client().channel()
def tearDown(self):
if self.channel._qos is not None:
self.channel._qos._on_collect.cancel()
def test_exchange_declare(self):
c = self.channel
c.exchange_declare("test_exchange_declare", "direct",
durable=True, auto_delete=True)
self.assertIn("test_exchange_declare", c.state.exchanges)
# can declare again with same values
c.exchange_declare("test_exchange_declare", "direct",
durable=True, auto_delete=True)
self.assertIn("test_exchange_declare", c.state.exchanges)
# using different values raises NotEquivalentError
with self.assertRaises(virtual.NotEquivalentError):
c.exchange_declare("test_exchange_declare", "direct",
durable=False, auto_delete=True)
def test_exchange_delete(self, ex="test_exchange_delete"):
class PurgeChannel(virtual.Channel):
purged = []
def _purge(self, queue):
self.purged.append(queue)
c = PurgeChannel(self.channel.connection)
c.exchange_declare(ex, "direct", durable=True, auto_delete=True)
self.assertIn(ex, c.state.exchanges)
self.assertNotIn(ex, c.state.bindings) # no bindings yet
c.exchange_delete(ex)
self.assertNotIn(ex, c.state.exchanges)
c.exchange_declare(ex, "direct", durable=True, auto_delete=True)
c.queue_declare(ex)
c.queue_bind(ex, ex, ex)
self.assertTrue(c.state.bindings[ex])
c.exchange_delete(ex)
self.assertNotIn(ex, c.state.bindings)
self.assertIn(ex, c.purged)
def test_queue_delete__if_empty(self, n="test_queue_delete__if_empty"):
class PurgeChannel(virtual.Channel):
purged = []
size = 30
def _purge(self, queue):
self.purged.append(queue)
def _size(self, queue):
return self.size
c = PurgeChannel(self.channel.connection)
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
c.queue_bind(n, n, n) # tests code path that returns
# if queue already bound.
c.queue_delete(n, if_empty=True)
self.assertIn(n, c.state.bindings)
c.size = 0
c.queue_delete(n, if_empty=True)
self.assertNotIn(n, c.state.bindings)
self.assertIn(n, c.purged)
def test_queue_purge(self, n="test_queue_purge"):
class PurgeChannel(virtual.Channel):
purged = []
def _purge(self, queue):
self.purged.append(queue)
c = PurgeChannel(self.channel.connection)
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
c.queue_purge(n)
self.assertIn(n, c.purged)
def test_basic_publish__get__consume__restore(self,
n="test_basic_publish"):
c = memory_client().channel()
c.exchange_declare(n)
c.queue_declare(n)
c.queue_bind(n, n, n)
c.queue_declare(n + "2")
c.queue_bind(n + "2", n, n)
m = c.prepare_message("nthex quick brown fox...")
c.basic_publish(m, n, n)
r1 = c.message_to_python(c.basic_get(n))
self.assertTrue(r1)
self.assertEqual(r1.body,
"nthex quick brown fox...".encode("utf-8"))
self.assertIsNone(c.basic_get(n))
consumer_tag = uuid()
c.basic_consume(n + "2", False, consumer_tag=consumer_tag,
callback=lambda *a: None)
self.assertIn(n + "2", c._active_queues)
r2, _ = c.drain_events()
r2 = c.message_to_python(r2)
self.assertEqual(r2.body,
"nthex quick brown fox...".encode("utf-8"))
self.assertEqual(r2.delivery_info["exchange"], n)
self.assertEqual(r2.delivery_info["routing_key"], n)
with self.assertRaises(virtual.Empty):
c.drain_events()
c.basic_cancel(consumer_tag)
c._restore(r2)
r3 = c.message_to_python(c.basic_get(n))
self.assertTrue(r3)
self.assertEqual(r3.body, "nthex quick brown fox...".encode("utf-8"))
self.assertIsNone(c.basic_get(n))
def test_basic_ack(self):
class MockQoS(virtual.QoS):
was_acked = False
def ack(self, delivery_tag):
self.was_acked = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_ack("foo")
self.assertTrue(self.channel._qos.was_acked)
def test_basic_recover__requeue(self):
class MockQoS(virtual.QoS):
was_restored = False
def restore_unacked(self):
self.was_restored = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_recover(requeue=True)
self.assertTrue(self.channel._qos.was_restored)
def test_restore_unacked_raises_BaseException(self):
q = self.channel.qos
q._flush = Mock()
q._delivered = {1: 1}
q.channel._restore = Mock()
q.channel._restore.side_effect = SystemExit
errors = q.restore_unacked()
self.assertIsInstance(errors[0][0], SystemExit)
self.assertEqual(errors[0][1], 1)
self.assertFalse(q._delivered)
@patch("kombu.transport.virtual.emergency_dump_state")
@patch("kombu.transport.virtual.say")
def test_restore_unacked_once_when_unrestored(self, say,
emergency_dump_state):
q = self.channel.qos
q._flush = Mock()
class State(dict):
restored = False
q._delivered = State({1: 1})
ru = q.restore_unacked = Mock()
exc = None
try:
raise KeyError()
except KeyError, exc_:
exc = exc_
ru.return_value = [(exc, 1)]
self.channel.do_restore = True
q.restore_unacked_once()
self.assertTrue(say.called)
self.assertTrue(emergency_dump_state.called)
def test_basic_recover(self):
with self.assertRaises(NotImplementedError):
self.channel.basic_recover(requeue=False)
def test_basic_reject(self):
class MockQoS(virtual.QoS):
was_rejected = False
def reject(self, delivery_tag, requeue=False):
self.was_rejected = True
self.channel._qos = MockQoS(self.channel)
self.channel.basic_reject("foo")
self.assertTrue(self.channel._qos.was_rejected)
def test_basic_qos(self):
self.channel.basic_qos(prefetch_count=128)
self.assertEqual(self.channel._qos.prefetch_count, 128)
def test_lookup__undeliverable(self, n="test_lookup__undeliverable"):
warnings.resetwarnings()
with catch_warnings(record=True) as log:
self.assertListEqual(self.channel._lookup(n, n, "ae.undeliver"),
["ae.undeliver"])
self.assertTrue(log)
self.assertIn("could not be delivered", log[0].message.args[0])
def test_context(self):
x = self.channel.__enter__()
self.assertIs(x, self.channel)
x.__exit__()
self.assertTrue(x.closed)
def test_cycle_property(self):
self.assertTrue(self.channel.cycle)
def test_flow(self):
with self.assertRaises(NotImplementedError):
self.channel.flow(False)
def test_close_when_no_connection(self):
self.channel.connection = None
self.channel.close()
self.assertTrue(self.channel.closed)
def test_drain_events_has_get_many(self):
c = self.channel
c._get_many = Mock()
c._poll = Mock()
c._consumers = [1]
c._qos = Mock()
c._qos.can_consume.return_value = True
c.drain_events(timeout=10.0)
c._get_many.assert_called_with(c._active_queues, timeout=10.0)
def test_get_exchanges(self):
self.channel.exchange_declare(exchange="foo")
self.assertTrue(self.channel.get_exchanges())
def test_basic_cancel_not_in_active_queues(self):
c = self.channel
c._consumers.add("x")
c._tag_to_queue["x"] = "foo"
c._active_queues = Mock()
c._active_queues.remove.side_effect = ValueError()
c.basic_cancel("x")
c._active_queues.remove.assert_called_with("foo")
def test_basic_cancel_unknown_ctag(self):
self.assertIsNone(self.channel.basic_cancel("unknown-tag"))
def test_list_bindings(self):
c = self.channel
c.exchange_declare(exchange="foo")
c.queue_declare(queue="q")
c.queue_bind(queue="q", exchange="foo", routing_key="rk")
self.assertIn(("q", "foo", "rk"), list(c.list_bindings()))
def test_after_reply_message_received(self):
c = self.channel
c.queue_delete = Mock()
c.after_reply_message_received("foo")
c.queue_delete.assert_called_with("foo")
def test_queue_delete_unknown_queue(self):
self.assertIsNone(self.channel.queue_delete("xiwjqjwel"))
def test_queue_declare_passive(self):
has_queue = self.channel._has_queue = Mock()
has_queue.return_value = False
with self.assertRaises(StdChannelError):
self.channel.queue_declare(queue="21wisdjwqe", passive=True)
class test_Transport(TestCase):
def setUp(self):
self.transport = client().transport
def test_custom_polling_interval(self):
x = client(transport_options=dict(polling_interval=32.3))
self.assertEqual(x.transport.polling_interval, 32.3)
def test_close_connection(self):
c1 = self.transport.create_channel(self.transport)
c2 = self.transport.create_channel(self.transport)
self.assertEqual(len(self.transport.channels), 2)
self.transport.close_connection(self.transport)
self.assertFalse(self.transport.channels)
del(c1) # so pyflakes doesn't complain
del(c2)
def test_drain_channel(self):
channel = self.transport.create_channel(self.transport)
with self.assertRaises(virtual.Empty):
self.transport._drain_channel(channel)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
IMPORTANT: This code is taken directly from Tensorflow
(https://github.com/tensorflow/tensorflow) and is copied temporarily
until it is available in a packaged Tensorflow version on pypi.
TODO(dennybritz): Delete this code when it becomes available in TF.
A library of helpers for use with SamplingDecoders.
"""
# pylint: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.ops.distributions import bernoulli
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
from seq2seq.contrib.seq2seq import decoder
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Helper interface. Helper instances are used by SamplingDecoder."""
@abc.abstractproperty
def batch_size(self):
"""Returns a scalar int32 tensor."""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sample_noise = random_ops.random_uniform(
[self.batch_size], seed=self._scheduling_seed)
select_sample = (self._sampling_probability > select_sample_noise)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
array_ops.tile([-1], [self.batch_size]))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
where_sampling_flat = array_ops.reshape(where_sampling, [-1])
where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
inputs_not_sampling = array_ops.gather(
base_next_inputs, where_not_sampling_flat)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_input_layer=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_input_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output to create
the next input.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
if (next_input_layer is not None and not isinstance(next_input_layer,
layers_base._Layer)): # pylint: disable=protected-access
raise TypeError("next_input_layer must be a Layer, received: %s" %
type(next_input_layer))
self._next_input_layer = next_input_layer
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
sampler = bernoulli.Bernoulli(probs=self._sampling_probability)
return math_ops.cast(
sampler.sample(sample_shape=self.batch_size, seed=self._seed),
dtypes.bool)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_input_layer is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_input_layer(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
|
|
"""
cf_stack.py
CloudFormation Stack help methods - currently extending boto3
Note:
Credentials are required to communicate with AWS.
aws cli profile can be passed in using --profile, or
the following ENVIRONMENT VARIABLES can be set before
running this script:
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
"""
import sys, os
import boto3, botocore
import argparse
import json, yaml
import logging
import time
logging.getLogger("botocore").setLevel(logging.CRITICAL)
def read_from_file(file_path):
'''
Read from a file and return a dict
:param file_path: path to file
:return: dict
'''
template_body = {}
if os.path.exists(file_path):
with open(file_path, 'r') as stream:
if file_path.endswith('.yaml'):
template_body = yaml.load(stream, yaml.SafeLoader)
else:
# Assume json if not yaml
template_body = json.load(stream)
else:
logging.error('given file: ' + file_path + ' does not exist')
return template_body
def process_stack_params_arg(stack_params):
'''
stack_params should be a list of key=value pairs
:param stack_params: list of 'key=value' strings
:return: list of dicts
'''
stack_parameters=[]
for param in stack_params:
key,value = param.split('=')
stack_parameters.append({'ParameterKey': key, 'ParameterValue': value})
return stack_parameters
def query_stack_status_in_region(region, stack_name, profile=None):
session = boto3.session.Session(profile_name=profile, region_name=region)
cf_client = session.client('cloudformation')
result = None
try:
query = cf_client.describe_stacks(StackName=stack_name)
result = query['Stacks'][0]['StackStatus']
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
# Stack doesn't exist - set create to True
result = 'DOES_NOT_EXIST'
else:
logging.error('Unexpected error: %s' % e)
return result
def query_stack_status(region_list, stack_name, profile=None):
result = {}
for region in region_list:
logging.debug("Querying stack in region: " + region)
result[region] = query_stack_status_in_region(region, stack_name, profile=profile)
return result
def get_stack_events_in_region(region, stack_name, profile=None):
session = boto3.session.Session(profile_name=profile, region_name=region)
cf_client = session.client('cloudformation')
events = []
try:
current_event_set = cf_client.describe_stack_events(StackName=stack_name)
events.extend(current_event_set['StackEvents'])
next_token = None
if 'NextToken' in current_event_set:
next_token = current_event_set['NextToken']
while next_token:
current_event_set = cf_client.describe_stack_events(StackName=stack_name, NextToken=next_token)
events.extend(current_event_set['StackEvents'])
next_token = None
if 'NextToken' in current_event_set:
next_token = current_event_set['NextToken']
except botocore.exceptions.ClientError as e:
logging.error("Connection error to AWS. Check your credentials. Error: %s" % e)
return events
def get_stack_events(region_list, stack_name, profile=None):
result = {}
for region in region_list:
logging.debug("Querying stack in region: " + region)
result[region] = query_stack_status_in_region(region, stack_name, profile=profile)
return result
def delete_stack_in_region(region, stack_name, profile=None):
session = boto3.session.Session(profile_name=profile, region_name=region)
cf_client = session.client('cloudformation')
result = None
response = cf_client.delete_stack(StackName=stack_name)
if 'ResponseMetadata' in response and 'HTTPStatusCode' in response['ResponseMetadata']:
result = (response['ResponseMetadata']['HTTPStatusCode'] == 200)
return result
def delete_stack(region_list, stack_name, profile=None):
result = {}
for region in region_list:
logging.debug("Deleting stack in region: " + region)
result[region] = delete_stack_in_region(region, stack_name, profile=profile)
return result
def update_stack_in_region(region, stack_name, stack_params, template_body, new_stack=False, profile=None, dryrun=False):
'''
Update a stack in the given region
:param region: region to create/update the stack in
:param stack_name: name of the stack
:param stack_params: stack parameters (list of dicts)
:param template_body: body of the template as a dict
:return: ARN of the stack, if created or None
'''
session = boto3.session.Session(profile_name=profile, region_name=region)
cf_client = session.client('cloudformation')
result = None
create = False
if new_stack:
create = True
# TODO: create_change_set
# Validate the template
logging.debug("Template body:\n" + str(template_body))
try:
cf_client.validate_template(TemplateBody=json.dumps(template_body))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ValidationError':
logging.error('Validation of given template failed')
else:
logging.error('Unexpected error: %s' % e)
return result
logging.debug("Stack params:\n" + str(stack_params))
# Make sure stack can be created/updated
stack_status = query_stack_status_in_region(region, stack_name, profile)
if stack_status:
if new_stack:
if stack_status != 'DOES_NOT_EXIST':
logging.error('Cannot create - stack already exists - use update to update it')
return result
else:
if stack_status == 'DOES_NOT_EXIST':
# Stack doesn't exist
create = True
else:
if not stack_status.endswith('_COMPLETE'):
logging.error('Stack is NOT in an updatable state')
logging.error('Current statck status is %s' % stack_status)
return result
else:
logging.error('Unable to get stack status')
return result
stack_arn = None
if create:
response = cf_client.create_stack(StackName=stack_name,
TemplateBody=json.dumps(template_body),
Parameters=stack_params)
if 'ResponseMetadata' in response and 'HTTPStatusCode' in response['ResponseMetadata'] \
and response['ResponseMetadata']['HTTPStatusCode'] == 200:
if 'StackId' in response:
stack_arn = response['StackId']
else:
try:
response = cf_client.update_stack(StackName=stack_name,
TemplateBody=json.dumps(template_body),
Parameters=stack_params)
if 'ResponseMetadata' in response and 'HTTPStatusCode' in response['ResponseMetadata'] \
and response['ResponseMetadata']['HTTPStatusCode'] == 200:
if 'StackId' in response:
stack_arn = response['StackId']
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ValidationError' and 'No updates are to be performed' in e.response['Error']['Message']:
logging.warn('No changes detected')
result = True
else:
logging.error('Unexpected error: %s' % e)
return result
if stack_arn:
# Got a stack_arn - query the status of the stack creation
CURRENT_CHECK = 0
MAX_CHECKS = 20
SLEEP_SECONDS = 30
logging.info("Creating stack with name %s in region %s " % (stack_name, region))
logging.info("*** This may take up to %5d seconds..." % (MAX_CHECKS * SLEEP_SECONDS))
stack_status = "Unknown"
while CURRENT_CHECK <= MAX_CHECKS:
stack_status = query_stack_status_in_region(region, stack_name, profile)
if 'ROLLBACK' in stack_status:
logging.error('*** ' + ('create' if create else 'update') + ' failed')
logging.error('*** Waiting 5 minutes to make sure stack rolled back successfully')
time.sleep(300)
stack_status = query_stack_status_in_region(region, stack_name, profile)
if 'FAILED' in stack_status:
logging.critical("*** Rollback has failed")
else:
logging.info("*** Stack rolled back")
# TODO: get list of stack events
logging.error("*** Stack operation failed.")
events = get_stack_events_in_region(region, stack_name, profile)
logging.error("%s" % str(events))
if create:
# This was a new stack - remove the unstable stack
logging.info("*** Removing unstable stack ...")
delete_stack_in_region(region, stack_name, profile)
result = False
break
elif 'COMPLETE' in stack_status:
logging.info('*** ' + ('create' if create else 'update') + ' completed successfully')
break
else:
logging.info("Current stack status: %s" % stack_status)
CURRENT_CHECK += 1
time.sleep(SLEEP_SECONDS)
if CURRENT_CHECK > MAX_CHECKS and 'COMPLETE' not in stack_status:
logging.error("*** Stack has not yet stabilized in %5d seconds - check the cloudformation console or the ECS events tab for more detail" % (MAX_CHECKS * SLEEP_SECONDS))
else:
result = True
return result
def update_stack(region_list, stack_name, stack_params, template_body, profile=None, dryrun=False):
'''
Create/Update a stack in the given regions
:param region_list: list of regions to create the stack in
:param stack_name: name of the stack
:param stack_params: stack parameters (list of dicts)
:param template_body: body of the template as a dict
:return: ARN of the stack(s) or None if not created
'''
result = {}
for region in region_list:
logging.debug("Creating stack in region: " + region)
result[region] = update_stack_in_region(region, stack_name, stack_params, template_body,
profile=profile, dryrun=dryrun)
return result
def create_stack(region_list, stack_name, stack_params, template_body, profile=None, dryrun=False):
'''
Create a stack in the given regions
:param region_list: list of regions to create the stack in
:param stack_name: name of the stack
:param stack_params: stack parameters (list of dicts)
:param template_body: body of the template as a dict
:return: ARN of the stack(s) or None if not created
'''
result = {}
for region in region_list:
logging.debug("Creating stack in region: " + region)
result[region] = update_stack_in_region(region, stack_name, stack_params, template_body, new_stack=True,
profile=profile, dryrun=dryrun)
return result
def _update_stack_parameters(region, stack_id, parameters, profile=None, dryrun=False):
"""
Update a given stack with the given parameters
:param region: region that the stack exists in
:param stack_id: the name or ID of the stack
:param parameters: list of parameter objects
:param dryrun: if true, no changes are made
:return: json object
"""
if not region:
logging.error("ERROR: You must supply a region to scan")
return None
else:
logging.info('Updating Stack: ' + stack_id)
for param in parameters:
if 'PreviousValue' in param:
logging.info(' ' + param['ParameterKey'])
logging.info(' OLD: ' + param['PreviousValue'])
logging.info(' NEW: ' + param['ParameterValue'])
del param['PreviousValue']
if not dryrun:
stack = get_stack_with_name_or_id(region, stack_id)
session = boto3.session.Session(profile_name=profile, region_name=region)
cf_client = session.client('cloudformation')
try:
if 'Capabilities' in stack:
status = cf_client.update_stack(StackName=stack_id, Parameters=parameters, UsePreviousTemplate=True, Capabilities=stack['Capabilities'])['StackId']
else:
status = cf_client.update_stack(StackName=stack_id, Parameters=parameters, UsePreviousTemplate=True)['StackId']
return status
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'ValidationError' and 'No updates are to be performed' in \
e.response['Error']['Message']:
logging.error(" ERROR: New value matches Old value - no update required")
else:
logging.error('Unexpected error: %s' % e)
return False
def get_stacks_with_given_parameter(region, parameter_list, profile=None):
"""
Get a list of stacks that have at least one parameter with a name in the given list
:param region: the region to scan
:param parameter_list: list of possible parameter names to look for
:return: list of stacks that have a parameter with a name in the given list
"""
stacks_with_given_parameter = []
if not region:
logging.error("ERROR: You must supply a region to scan")
return None
else:
session = boto3.session.Session(profile_name=profile, region_name=region)
cf_client = session.client('cloudformation')
cf_data = cf_client.describe_stacks()
if "Stacks" in cf_data:
for stack in cf_data["Stacks"]:
if "Parameters" in stack:
for parameter in stack["Parameters"]:
if parameter['ParameterKey'] in parameter_list:
logging.debug(
"Found parameter - " + parameter['ParameterKey'] + ' - in stack: ' + stack['StackName'])
stacks_with_given_parameter.append(stack)
break
return stacks_with_given_parameter
def get_stack_with_name_or_id(region, stack_id, profile=None):
"""
Get the stack with the given name or ID
:param region: region where the stack exists
:param stack_id: stack name or ID
:return: stack in question or empty dictionary
"""
stack = {}
if not region:
logging.error("ERROR: You must supply a region to scan")
return None
else:
session = boto3.session.Session(profile_name=profile, region_name=region)
cf_client = session.client('cloudformation')
cf_data = cf_client.describe_stacks(StackName=stack_id)
logging.debug("Getting stack description for stack with name/id: " + stack_id)
if "Stacks" in cf_data:
if len(cf_data["Stacks"]) > 1:
# Problem - mutliple stacks with this name??
logging.error("Error: Multiple stacks with given name")
else:
stack = cf_data['Stacks'][0]
return stack
def get_new_parameter_list_for_update(stack, expected_value, new_value, parameter_to_change, force=False):
"""
Given a list of possible parameter names, generate a new parameter list with
existing_value changed to new_value
:param stack: the stack to mine the parameter list from
:param expected_value: expected existing value for the parameter
:param new_value: the new value to use for the parameter
:param parameter_to_change: list containing possible parameter names
:return: tuple containing a boolean for whether an update is required and the new parameter list
"""
new_parameters_list = []
update_required = False
for parameter in stack['Parameters']:
new_param = {}
new_param['ParameterKey'] = parameter['ParameterKey']
if parameter['ParameterKey'] in parameter_to_change:
if force or (expected_value in parameter['ParameterValue']):
update_required = True
new_param['ParameterValue'] = new_value
new_param['PreviousValue'] = parameter['ParameterValue']
else:
logging.warn(
"Unexpected value detected - Stack will NOT be updated\n Stack: " + stack[
'StackName'] + "\n Existing value: " + parameter['ParameterValue'])
new_param['UsePreviousValue'] = True
else:
new_param['UsePreviousValue'] = True
new_parameters_list.append(new_param)
return update_required, new_parameters_list
def list_stacks_with_given_parameter_in_region(region, parameter_list, profile=None):
"""
Print a list of stacks in a given region that contain at least one parameter in the given parameter list
:param region: region to scan
:param parameter_list: list of possible parameter names
"""
if not region:
logging.error("You must supply a region to scan")
else:
logging.info(
"\nCloudFormation Stacks in region: " + region + " with at least one of the following parameters: " + ', '.join(
parameter_list))
stacks_with_given_parameter = get_stacks_with_given_parameter(region, parameter_list, profile=profile)
if len(stacks_with_given_parameter) > 0:
for stack in stacks_with_given_parameter:
for parameter in stack["Parameters"]:
if parameter['ParameterKey'] in parameter_list:
logging.info(
"Stack Name: " + stack["StackName"] + "\n " + parameter['ParameterKey'] + ": " +
parameter[
"ParameterValue"])
break
else:
logging.info("None")
def list_stacks_with_given_parameter(region_list, parameter_list, profile=None):
"""
Print a list of stacks for the given regions that contain at least one parameter in the given parameter list
:param region_list: list of regions
:param parameter_list: list of possible parameter names
"""
for region in region_list:
list_stacks_with_given_parameter_in_region(region, parameter_list, profile=profile)
def update_stack_with_given_parameter(region, stack_name, expected_value, new_value, parameter_to_change, profile=None,
dryrun=False, force=False):
"""
Update the given parameter in the given stack (for the given region) to a new value, provided
the existing value contains the expected_value
:param region: region where the stack exists
:param stack_name: the stack name or ID
:param expected_value: the existing value of the parameter should contain the expected_value
:param new_value: the new value for the parameter
:param parameter_to_change: list of possible names for the parameter
:param dryrun: if true, no changes are made
:returns: True / False
"""
stack = get_stack_with_name_or_id(region, stack_name)
update_required, new_parameter_list = get_new_parameter_list_for_update(stack, expected_value, new_value,
parameter_to_change, force)
if update_required:
return _update_stack_parameters(region, stack["StackId"], new_parameter_list, profile=profile, dryrun=dryrun)
def update_all_stacks_with_given_parameter(region, expected_value, new_value, parameter_to_change, profile=None,
dryrun=False, force=False):
"""
Update the given parameter in all stacks (for the given region) to a new value, provided
the existing value contains the expected_value
:param region: region where the stacks exist
:param expected_value: the existing value of the parameter should contain the expected_value
:param new_value: the new value for the parameter
:param parameter_to_change: list of possible names for the parameter
:param dryrun: if true, no changes are made
"""
logging.info('\nUpdating all matching Stacks in region: ' + region)
update_status = {}
for stack in get_stacks_with_given_parameter(region, parameter_to_change, profile=profile):
update_required, new_parameter_list = get_new_parameter_list_for_update(stack, expected_value, new_value,
parameter_to_change, force)
if update_required:
update_status[stack['StackId']] = _update_stack_parameters(region, stack["StackId"], new_parameter_list,
profile=profile, dryrun=dryrun)
return update_status
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Script to view/modify CloudFormation Stacks')
me_cmd_group = parser.add_mutually_exclusive_group(required=True)
me_cmd_group.add_argument("--query-status", help="query stack status in given region(s)", dest='query_stack', metavar='STACK_NAME')
me_cmd_group.add_argument("--delete-stack", help="delete stack in given region(s)", dest='delete_stack', metavar='STACK_NAME')
me_cmd_group.add_argument("--create-stack", help="create stack in given region(s)", dest='create_stack', metavar='STACK_NAME')
me_cmd_group.add_argument("--update-stack", help="update stack in given region(s)", dest='update_stack', metavar='STACK_NAME')
me_cmd_group.add_argument("--update",
help="Update Parameter to new value for given stack in the specified region - must supply expected existing value and new value. STACK_ID can be the name or ID of the Stack",
dest='update', nargs=3, metavar=('STACK_ID', 'EXPECTED_VALUE', 'NEW_VALUE'))
me_cmd_group.add_argument("--update-all",
help="Update Parameter to new value for all stacks in the specified region - must supply expected existing value and new value",
dest='update_all', nargs=2, metavar=('EXPECTED_VALUE', 'NEW_VALUE'))
me_cmd_group.add_argument("--list",
help="List all stacks in given region(s) that have a given parameter",
dest='list', action='store_true')
parser.add_argument("--stack-params", help="space separated list of key=value stack parameters", dest='stack_params',
nargs='+', required=False)
parser.add_argument("--template-body", help="CFN template body (as json/yaml - preface with file:// if file)", dest='template_body',
required=False)
parser.add_argument("--param", help="space separated list of possible names for the parameter", dest='param',
nargs='+', required=False)
parser.add_argument("--regions", help="Specify regions (space separated)", dest='regions', nargs='+', required=True)
parser.add_argument("--profile",
help="The name of an aws cli profile to use.", dest='profile', required=False)
parser.add_argument("--dryrun", help="Do a dryrun - no changes will be performed", dest='dryrun',
action='store_true', default=False, required=False)
parser.add_argument("--force", help="Force an update, even if expected value doesn't match", dest='force',
action='store_true', default=False, required=False)
parser.add_argument("--verbose", help="Turn on DEBUG logging", action='store_true', required=False)
args = parser.parse_args()
log_level = logging.INFO
if args.verbose:
log_level = logging.DEBUG
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('cf_stack.log')
fh.setLevel(logging.DEBUG)
# create console handler using level set in log_level
ch = logging.StreamHandler()
ch.setLevel(log_level)
console_formatter = logging.Formatter('%(levelname)8s: %(message)s')
ch.setFormatter(console_formatter)
file_formatter = logging.Formatter('%(asctime)s - %(levelname)8s: %(message)s')
fh.setFormatter(file_formatter)
# Add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logging.debug("INIT")
if args.dryrun:
logging.info("***** Dryrun selected - no changes will be made *****")
if args.regions:
logging.info("Regions: " + str(args.regions))
if not args.param:
if args.list or args.update or args.update_all:
logging.critical("Must supply a parameter")
sys.exit(1)
if args.create_stack or args.update_stack:
if not args.template_body:
logging.critical("Must supply a template body")
sys.exit(1)
if args.template_body.startswith('file://'):
# Template is a file - read it in
file_path = args.template_body[7:]
template_body = read_from_file(file_path)
if not template_body:
sys.exit(1)
else:
template_body = args.template_body
stack_params = process_stack_params_arg(args.stack_params)
result = None
if args.create_stack:
result = create_stack(args.regions, args.create_stack, stack_params, template_body, args.profile, args.dryrun)
elif args.update_stack:
result = update_stack(args.regions, args.update_stack, stack_params, template_body, args.profile, args.dryrun)
if not args.dryrun:
if result:
logging.info("Create/Update results:")
for region in result:
logging.info(' ' + region + ': ' + ("Success" if result[region] else "Failed"))
else:
logging.error("Create/Update failed.")
elif args.delete_stack:
result = delete_stack(args.regions, args.delete_stack, args.profile)
elif args.query_stack:
result = query_stack_status(args.regions, args.query_stack, args.profile)
elif args.list:
if not args.param:
logging.critical("Must supply a parameter name")
sys.exit(1)
logging.info("Searching for Stacks with a Parameter with name(s): " + ' '.join(args.param))
list_stacks_with_given_parameter(args.regions, args.param, args.profile)
elif args.update:
if 'all' in args.regions:
logging.critical("Only one region can be specified with update", "error")
sys.exit(1)
if not args.param:
logging.critical("Must supply a parameter name")
sys.exit(1)
update_status = update_stack_with_given_parameter(args.regions, args.update[0], args.update[1], args.update[2],
args.param, args.profile, args.dryrun, args.force)
if not args.dryrun:
if update_status:
logging.info("Stack Parameter Update Succeeded")
else:
logging.error("Stack Parameter Update Failed")
elif args.update_all:
if len(args.regions) > 1:
logging.critical("Only one region can be specified with update", "error")
sys.exit(1)
if not args.param:
logging.critical("Must supply a parameter name")
sys.exit(1)
update_status = update_all_stacks_with_given_parameter(args.regions, args.update_all[0], args.update_all[1],
args.param, args.profile, args.dryrun, args.force)
if not args.dryrun and update_status:
logging.info("UPDATE STATUS:")
for stack in update_status:
logging.info(stack + ": " + ('Succeeded' if update_status[stack] else 'Failed'))
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_OptimizeDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.experimental.ops import optimization_options
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.experimental.ops import threadpool
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
def _captured_refvar_test_combinations():
def make_map_dataset(var):
return dataset_ops.Dataset.from_tensors(0).map(lambda x: x + var)
def make_flat_map_dataset(var):
return dataset_ops.Dataset.from_tensors(
0).flat_map(lambda _: dataset_ops.Dataset.from_tensors(var))
def make_filter_dataset(var):
return dataset_ops.Dataset.from_tensors(0).filter(lambda x: x < var)
def make_map_and_batch_dataset(var):
def map_fn(x):
return x + var
return dataset_ops.Dataset.from_tensors(0).apply(
batching.map_and_batch(map_fn, 1))
def make_group_by_reducer_dataset(var):
reducer = grouping.Reducer(
init_func=lambda _: 0,
reduce_func=lambda x, y: x,
finalize_func=lambda _: var)
return dataset_ops.Dataset.range(5).apply(
grouping.group_by_reducer(lambda x: x % 2, reducer))
def make_group_by_window_dataset(var):
def reduce_fn(key, bucket):
del key, bucket
return dataset_ops.Dataset.from_tensors(var)
return dataset_ops.Dataset.from_tensors(0).repeat(10).apply(
grouping.group_by_window(lambda _: 0, reduce_fn, 10))
def make_scan_dataset(var):
return dataset_ops.Dataset.from_tensors(0).apply(
scan_ops.scan(
0, lambda old_state, elem: (old_state + 1, elem + old_state + var)))
cases = [
# Core datasets
("Map", make_map_dataset),
("FlatMap", make_flat_map_dataset),
("Filter", make_filter_dataset),
# Experimental datasets
("MapAndBatch", make_map_and_batch_dataset),
("GroupByReducer", make_group_by_reducer_dataset),
("GroupByWindow", make_group_by_window_dataset),
("Scan", make_scan_dataset)
]
def reduce_fn(x, y):
name, dataset_fn = y
return x + combinations.combine(
dataset_fn=combinations.NamedObject(name, dataset_fn))
return functools.reduce(reduce_fn, cases, [])
def _disable_intra_op_parallelism_test_combinations():
def make_tensor_dataset():
return dataset_ops.Dataset.from_tensors(42)
def make_map_dataset():
return dataset_ops.Dataset.from_tensors(42).map(lambda x: x + 1)
cases = [
("FromTensors", make_tensor_dataset, [42]),
("Map", make_map_dataset, [43]),
]
def reduce_fn(x, y):
name, dataset_fn, expected_output = y
return x + combinations.combine(
dataset_fn=combinations.NamedObject(name, dataset_fn),
expected_output=[expected_output])
return functools.reduce(reduce_fn, cases, [])
class OptimizeDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testOptimizationStatefulFunction(self):
dataset = dataset_ops.Dataset.range(
10).map(lambda _: random_ops.random_uniform([])).batch(10)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
self.evaluate(get_next())
# TODO(b/123902160)
@combinations.generate(test_base.graph_only_combinations())
def testOptimizationLargeInputFromTensor(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None))
dataset = dataset_ops.Dataset.from_tensors(input_t)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([512, 1024, 1025], np.int32)})
self.evaluate(get_next)
# TODO(b/123902160)
@combinations.generate(test_base.graph_only_combinations())
def testOptimizationLargeInputFromTensorSlices(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None, None))
dataset = dataset_ops.Dataset.from_tensor_slices(input_t)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([1, 512, 1024, 1025], np.int32)})
self.evaluate(get_next)
@combinations.generate(test_base.default_test_combinations())
def testOptimizationNestedDataset(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(testing.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # Should be removed by noop elimination
dataset = dataset.cache()
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
@combinations.generate(test_base.default_test_combinations())
def testOptimizationNestedDatasetWithModifiedRetval(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(testing.assert_next(["MapAndBatch"]))
# Should be fused by map and batch fusion
dataset = dataset.map(lambda x: x)
dataset = dataset.batch(1)
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[[0]])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
_disable_intra_op_parallelism_test_combinations(),
combinations.combine(apply_autotune=[None, True, False])))
def testOptimizationDisableIntraOpParallelism(self, dataset_fn,
expected_output,
apply_autotune):
dataset = dataset_fn()
dataset = dataset.apply(testing.assert_next(["MaxIntraOpParallelism"]))
if apply_autotune is not None:
options = dataset_ops.Options()
options.experimental_optimization.autotune = apply_autotune
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(autotune=False, autotune_buffers=False) +
combinations.combine(autotune=True, autotune_buffers=False) +
combinations.combine(autotune=True, autotune_buffers=True),
combinations.combine(set_env=[False, True])))
def testOptimizationEnableGradientDescent(self, autotune, autotune_buffers,
set_env):
if set_env:
os.environ["TF_DATA_EXPERIMENT_OPT_IN"] = "enable_gradient_descent"
os.environ["TF_JOB_NAME"] = "test_job"
dataset = dataset_ops.Dataset.range(5)
dataset = dataset.prefetch(buffer_size=-1)
dataset = dataset.map(lambda x: x + 1, num_parallel_calls=2)
dataset = dataset.map(lambda x: x + 1, num_parallel_calls=-1)
dataset = dataset.prefetch(buffer_size=3)
dataset = dataset.map(lambda x: x + 1, num_parallel_calls=-1)
dataset = dataset.prefetch(buffer_size=1)
options = dataset_ops.Options()
options.experimental_optimization.autotune = autotune
options.experimental_optimization.autotune_buffers = autotune_buffers
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=list(range(3, 8)))
if set_env:
del os.environ["TF_DATA_EXPERIMENT_OPT_IN"]
del os.environ["TF_JOB_NAME"]
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(autotune=False, autotune_buffers=False) +
combinations.combine(autotune=True, autotune_buffers=False) +
combinations.combine(autotune=True, autotune_buffers=True),
combinations.combine(first_buffer_sizes=[(1, -1, -1, 4),
(2, -1, 3, -1),
(2, 1, -1, -1)]),
combinations.combine(second_buffer_sizes=[(1, -1, -1, 4),
(2, -1, 3, -1),
(2, 1, -1, -1)]))
)
def testOptimizationAutotuneBuffers(self, autotune, autotune_buffers,
first_buffer_sizes, second_buffer_sizes):
dataset = dataset_ops.Dataset.range(10)
for buffer_size in first_buffer_sizes:
dataset = dataset.prefetch(buffer_size=buffer_size)
dataset = dataset.map(lambda x: x + 1)
for buffer_size in second_buffer_sizes:
dataset = dataset.prefetch(buffer_size=buffer_size)
options = dataset_ops.Options()
options.experimental_optimization.autotune = autotune
options.experimental_optimization.autotune_buffers = autotune_buffers
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=list(range(1, 11)))
@combinations.generate(test_base.default_test_combinations())
def testOptimizationThreadPoolDataset(self):
dataset = dataset_ops.Dataset.range(10).batch(10)
dataset = threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(
2, display_name="private_thread_pool_%d" % 2))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(10))],
requires_initialization=True)
# Reference variables are not supported in eager mode.
@combinations.generate(
combinations.times(test_base.graph_only_combinations(),
_captured_refvar_test_combinations()))
def testOptimizationWithCapturedRefVar(self, dataset_fn):
"""Tests that default optimizations are disabled with ref variables."""
variable = variable_scope.get_variable(
"v", initializer=0, use_resource=False)
assign_op = variable.assign_add(1)
# Check that warning is logged.
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
unoptimized_dataset = dataset_fn(variable)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.map_and_batch_fusion = True
optimized_dataset = unoptimized_dataset.with_options(options)
optimized_it = dataset_ops.make_initializable_iterator(optimized_dataset)
self.assertGreaterEqual(len(w), 1)
graph_rewrites = options._graph_rewrites()
expected = (
"tf.data graph rewrites are not compatible with "
"tf.Variable. The following rewrites will be disabled: %s."
" To enable rewrites, use resource variables instead by "
"calling `tf.enable_resource_variables()` at the start of the "
"program." %
(", ".join(graph_rewrites.enabled + graph_rewrites.default)))
self.assertTrue(any(expected in str(warning) for warning in w))
# Check that outputs are the same in the optimized and unoptimized cases,
# when the variable value is changing.
unoptimized_it = dataset_ops.make_initializable_iterator(
unoptimized_dataset)
with ops.control_dependencies([assign_op]):
unoptimized_output = unoptimized_it.get_next()
optimized_output = optimized_it.get_next()
self.evaluate(variable.initializer)
self.evaluate((unoptimized_it.initializer, optimized_it.initializer))
while True:
try:
unoptimized, optimized = self.evaluate((unoptimized_output,
optimized_output))
self.assertEqual(unoptimized, optimized)
except errors.OutOfRangeError:
break
@combinations.generate(test_base.default_test_combinations())
def testOptimizationDefault(self):
"""Tests the optimization settings by default."""
options = dataset_ops.Options()
expected_optimizations_enabled = []
expected_optimizations_disabled = []
expected_optimizations_default = [
"map_and_batch_fusion",
"noop_elimination",
"shuffle_and_repeat_fusion",
]
graph_rewrites = options._graph_rewrites()
self.assertEqual(set(graph_rewrites.enabled),
set(expected_optimizations_enabled))
self.assertEqual(set(graph_rewrites.disabled),
set(expected_optimizations_disabled))
self.assertEqual(set(graph_rewrites.default),
set(expected_optimizations_default))
options.experimental_optimization.apply_default_optimizations = True
graph_rewrites = options._graph_rewrites()
self.assertEqual(set(graph_rewrites.enabled),
set(expected_optimizations_enabled))
self.assertEqual(set(graph_rewrites.disabled),
set(expected_optimizations_disabled))
self.assertEqual(set(graph_rewrites.default),
set(expected_optimizations_default))
options.experimental_optimization.apply_default_optimizations = False
expected_optimizations_default = []
graph_rewrites = options._graph_rewrites()
self.assertEqual(set(graph_rewrites.enabled),
set(expected_optimizations_enabled))
self.assertEqual(set(graph_rewrites.disabled),
set(expected_optimizations_disabled))
self.assertEqual(set(graph_rewrites.default),
set(expected_optimizations_default))
@combinations.generate(test_base.default_test_combinations())
def testOptimizationEnabled(self):
"""Tests the optimization settings by enabling all."""
options = dataset_ops.Options()
options.experimental_optimization.filter_fusion = True
options.experimental_optimization.filter_with_random_uniform_fusion = True
options.experimental_optimization.hoist_random_uniform = True
options.experimental_optimization.map_and_batch_fusion = True
options.experimental_optimization.map_and_filter_fusion = True
options.experimental_optimization.map_parallelization = True
options.experimental_optimization.map_fusion = True
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.parallel_batch = True
options.experimental_optimization.shuffle_and_repeat_fusion = True
options.experimental_optimization.map_vectorization.enabled = True
options.experimental_optimization.autotune_buffers = True
options.experimental_deterministic = False
options.experimental_stats.latency_all_edges = True
options.experimental_slack = True
expected_optimizations_enabled = [
"filter_fusion",
"filter_with_random_uniform_fusion",
"hoist_random_uniform",
"map_and_batch_fusion",
"map_and_filter_fusion",
"map_parallelization",
"map_fusion",
"noop_elimination",
"parallel_batch",
"shuffle_and_repeat_fusion",
"map_vectorization",
"autotune_buffer_sizes",
"make_sloppy",
"latency_all_edges",
"slack",
"disable_prefetch_legacy_autotune",
]
expected_optimizations_disabled = []
expected_optimizations_default = []
graph_rewrites = options._graph_rewrites()
self.assertEqual(set(graph_rewrites.enabled),
set(expected_optimizations_enabled))
self.assertEqual(set(graph_rewrites.disabled),
set(expected_optimizations_disabled))
self.assertEqual(set(graph_rewrites.default),
set(expected_optimizations_default))
@combinations.generate(test_base.default_test_combinations())
def testOptimizationDisabled(self):
"""Tests the optimization settings by disabling all."""
options = dataset_ops.Options()
options.experimental_optimization.filter_fusion = False
options.experimental_optimization.filter_with_random_uniform_fusion = False
options.experimental_optimization.hoist_random_uniform = False
options.experimental_optimization.map_and_batch_fusion = False
options.experimental_optimization.map_and_filter_fusion = False
options.experimental_optimization.map_parallelization = False
options.experimental_optimization.map_fusion = False
options.experimental_optimization.noop_elimination = False
options.experimental_optimization.parallel_batch = False
options.experimental_optimization.shuffle_and_repeat_fusion = False
options.experimental_optimization.map_vectorization.enabled = False
options.experimental_optimization.autotune = False
options.experimental_deterministic = True
options.experimental_stats.latency_all_edges = False
options.experimental_slack = False
expected_optimizations_enabled = []
expected_optimizations_disabled = [
"filter_fusion",
"filter_with_random_uniform_fusion",
"hoist_random_uniform",
"map_and_batch_fusion",
"map_and_filter_fusion",
"map_parallelization",
"map_fusion",
"noop_elimination",
"parallel_batch",
"shuffle_and_repeat_fusion",
"map_vectorization",
"autotune_buffer_sizes",
"make_sloppy",
"latency_all_edges",
"slack",
"disable_prefetch_legacy_autotune",
]
expected_optimizations_default = []
graph_rewrites = options._graph_rewrites()
self.assertEqual(set(graph_rewrites.enabled),
set(expected_optimizations_enabled))
self.assertEqual(set(graph_rewrites.disabled),
set(expected_optimizations_disabled))
self.assertEqual(set(graph_rewrites.default),
set(expected_optimizations_default))
@combinations.generate(test_base.default_test_combinations())
def testAutotuningDefaults(self):
options = dataset_ops.Options()
# Check defaults
autotune, algorithm, cpu_budget, ram_budget = options._autotune_settings()
self.assertTrue(autotune)
self.assertEqual(algorithm,
optimization_options._AutotuneAlgorithm.HILL_CLIMB)
self.assertEqual(cpu_budget, 0)
self.assertEqual(ram_budget, 0)
@combinations.generate(test_base.default_test_combinations())
def testAutotuningSettings(self):
options = dataset_ops.Options()
options.experimental_optimization.autotune_cpu_budget = 1000
options.experimental_optimization.autotune_ram_budget = 999999999
options.experimental_optimization.autotune_buffers = True
self.assertIn("autotune_buffer_sizes", options._graph_rewrites().enabled)
self.assertIn("disable_prefetch_legacy_autotune",
options._graph_rewrites().enabled)
autotune, algorithm, cpu_budget, ram_budget = options._autotune_settings()
self.assertTrue(autotune)
self.assertEqual(algorithm,
optimization_options._AutotuneAlgorithm.GRADIENT_DESCENT)
self.assertEqual(cpu_budget, 1000)
self.assertEqual(ram_budget, 999999999)
if __name__ == "__main__":
test.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Ironic BaseConductorManager."""
import collections
from unittest import mock
import uuid
import eventlet
import futurist
from futurist import periodics
from ironic_lib import mdns
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_utils import uuidutils
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.conductor import base_manager
from ironic.conductor import manager
from ironic.conductor import notification_utils
from ironic.conductor import task_manager
from ironic.db import api as dbapi
from ironic.drivers import fake_hardware
from ironic.drivers import generic
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic import objects
from ironic.objects import fields
from ironic.tests import base as tests_base
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
CONF = cfg.CONF
@mgr_utils.mock_record_keepalive
class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_start_registers_conductor(self):
self.assertRaises(exception.ConductorNotFound,
objects.Conductor.get_by_hostname,
self.context, self.hostname)
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
def test_start_clears_conductor_locks(self):
node = obj_utils.create_test_node(self.context,
reservation=self.hostname)
node.save()
self._start_service()
node.refresh()
self.assertIsNone(node.reservation)
def test_stop_clears_conductor_locks(self):
node = obj_utils.create_test_node(self.context,
reservation=self.hostname)
node.save()
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
self.service.del_host()
node.refresh()
self.assertIsNone(node.reservation)
def test_stop_unregisters_conductor(self):
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
self.service.del_host()
self.assertRaises(exception.ConductorNotFound,
objects.Conductor.get_by_hostname,
self.context, self.hostname)
def test_stop_doesnt_unregister_conductor(self):
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
self.service.del_host(deregister=False)
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
@mock.patch.object(manager.ConductorManager, 'init_host', autospec=True)
def test_stop_uninitialized_conductor(self, mock_init):
self._start_service()
self.service.del_host()
@mock.patch.object(driver_factory.HardwareTypesFactory, '__getitem__',
lambda *args: mock.MagicMock())
@mock.patch.object(driver_factory, 'default_interface', autospec=True)
def test_start_registers_driver_names(self, mock_def_iface):
init_names = ['fake1', 'fake2']
restart_names = ['fake3', 'fake4']
mock_def_iface.return_value = 'fake'
df = driver_factory.HardwareTypesFactory()
with mock.patch.object(df._extension_manager, 'names',
autospec=True) as mock_names:
# verify driver names are registered
self.config(enabled_hardware_types=init_names)
mock_names.return_value = init_names
self._start_service()
res = objects.Conductor.get_by_hostname(self.context,
self.hostname)
self.assertEqual(init_names, res['drivers'])
self._stop_service()
# verify that restart registers new driver names
self.config(enabled_hardware_types=restart_names)
mock_names.return_value = restart_names
self._start_service()
res = objects.Conductor.get_by_hostname(self.context,
self.hostname)
self.assertEqual(restart_names, res['drivers'])
@mock.patch.object(base_manager.BaseConductorManager,
'_register_and_validate_hardware_interfaces',
autospec=True)
@mock.patch.object(driver_factory, 'all_interfaces', autospec=True)
@mock.patch.object(driver_factory, 'hardware_types', autospec=True)
def test_start_registers_driver_specific_tasks(self,
mock_hw_types, mock_ifaces,
mock_reg_hw_ifaces):
class TestHwType(generic.GenericHardware):
@property
def supported_management_interfaces(self):
return []
@property
def supported_power_interfaces(self):
return []
# This should not be collected, since we don't collect periodic
# tasks from hardware types
@periodics.periodic(spacing=100500)
def task(self):
pass
class TestInterface(object):
@periodics.periodic(spacing=100500)
def iface(self):
pass
class TestInterface2(object):
@periodics.periodic(spacing=100500)
def iface(self):
pass
hw_type = TestHwType()
iface1 = TestInterface()
iface2 = TestInterface2()
expected = [iface1.iface, iface2.iface]
mock_hw_types.return_value = {'fake1': hw_type}
mock_ifaces.return_value = {
'management': {'fake1': iface1},
'power': {'fake2': iface2}
}
self._start_service(start_periodic_tasks=True)
tasks = {c[0] for c in self.service._periodic_task_callables}
for item in expected:
self.assertTrue(periodics.is_periodic(item))
self.assertIn(item, tasks)
# no periodic tasks from the hardware type
self.assertTrue(periodics.is_periodic(hw_type.task))
self.assertNotIn(hw_type.task, tasks)
@mock.patch.object(driver_factory.HardwareTypesFactory, '__init__',
autospec=True)
def test_start_fails_on_missing_driver(self, mock_df):
mock_df.side_effect = exception.DriverNotFound('test')
with mock.patch.object(self.dbapi, 'register_conductor',
autospec=True) as mock_reg:
self.assertRaises(exception.DriverNotFound,
self.service.init_host)
self.assertTrue(mock_df.called)
self.assertFalse(mock_reg.called)
def test_start_with_no_enabled_interfaces(self):
self.config(enabled_boot_interfaces=[],
enabled_deploy_interfaces=[],
enabled_hardware_types=['fake-hardware'])
self._start_service()
@mock.patch.object(base_manager, 'LOG', autospec=True)
@mock.patch.object(driver_factory, 'HardwareTypesFactory', autospec=True)
def test_start_fails_on_hw_types(self, ht_mock, log_mock):
driver_factory_mock = mock.MagicMock(names=[])
ht_mock.return_value = driver_factory_mock
self.assertRaises(exception.NoDriversLoaded,
self.service.init_host)
self.assertTrue(log_mock.error.called)
ht_mock.assert_called_once_with()
@mock.patch.object(base_manager, 'LOG', autospec=True)
@mock.patch.object(base_manager.BaseConductorManager,
'_register_and_validate_hardware_interfaces',
autospec=True)
@mock.patch.object(base_manager.BaseConductorManager, 'del_host',
autospec=True)
def test_start_fails_hw_type_register(self, del_mock, reg_mock, log_mock):
reg_mock.side_effect = exception.DriverNotFound('hw-type')
self.assertRaises(exception.DriverNotFound,
self.service.init_host)
self.assertTrue(log_mock.error.called)
del_mock.assert_called_once()
def test_prevent_double_start(self):
self._start_service()
self.assertRaisesRegex(RuntimeError, 'already running',
self.service.init_host)
def test_start_recover_nodes_stuck(self):
state_trans = [
(states.DEPLOYING, states.DEPLOYFAIL),
(states.CLEANING, states.CLEANFAIL),
(states.VERIFYING, states.ENROLL),
(states.INSPECTING, states.INSPECTFAIL),
(states.ADOPTING, states.ADOPTFAIL),
(states.RESCUING, states.RESCUEFAIL),
(states.UNRESCUING, states.UNRESCUEFAIL),
(states.DELETING, states.ERROR),
]
nodes = [obj_utils.create_test_node(self.context, uuid=uuid.uuid4(),
driver='fake-hardware',
provision_state=state[0])
for state in state_trans]
self._start_service()
for node, state in zip(nodes, state_trans):
node.refresh()
self.assertEqual(state[1], node.provision_state,
'Test failed when recovering from %s' % state[0])
@mock.patch.object(base_manager, 'LOG', autospec=True)
def test_warning_on_low_workers_pool(self, log_mock):
CONF.set_override('workers_pool_size', 3, 'conductor')
self._start_service()
self.assertTrue(log_mock.warning.called)
@mock.patch.object(eventlet.greenpool.GreenPool, 'waitall', autospec=True)
def test_del_host_waits_on_workerpool(self, wait_mock):
self._start_service()
self.service.del_host()
self.assertTrue(wait_mock.called)
def test_conductor_shutdown_flag(self):
self._start_service()
self.assertFalse(self.service._shutdown)
self.service.del_host()
self.assertTrue(self.service._shutdown)
@mock.patch.object(deploy_utils, 'get_ironic_api_url', autospec=True)
@mock.patch.object(mdns, 'Zeroconf', autospec=True)
def test_start_with_mdns(self, mock_zc, mock_api_url):
CONF.set_override('debug', False)
CONF.set_override('enable_mdns', True, 'conductor')
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
mock_zc.return_value.register_service.assert_called_once_with(
'baremetal',
mock_api_url.return_value,
params={})
@mock.patch.object(deploy_utils, 'get_ironic_api_url', autospec=True)
@mock.patch.object(mdns, 'Zeroconf', autospec=True)
def test_start_with_mdns_and_debug(self, mock_zc, mock_api_url):
CONF.set_override('debug', True)
CONF.set_override('enable_mdns', True, 'conductor')
self._start_service()
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
mock_zc.return_value.register_service.assert_called_once_with(
'baremetal',
mock_api_url.return_value,
params={'ipa_debug': True})
def test_del_host_with_mdns(self):
mock_zc = mock.Mock(spec=mdns.Zeroconf)
self.service._zeroconf = mock_zc
self._start_service()
self.service.del_host()
mock_zc.close.assert_called_once_with()
self.assertIsNone(self.service._zeroconf)
@mock.patch.object(dbapi, 'get_instance', autospec=True)
def test_start_dbapi_single_call(self, mock_dbapi):
self._start_service()
# NOTE(TheJulia): This seems like it should only be 1, but
# the hash ring initailization pulls it's own database connection
# instance, which is likely a good thing, thus this is 2 instead of
# 3 without reuse of the database connection.
self.assertEqual(2, mock_dbapi.call_count)
class KeepAliveTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__conductor_service_record_keepalive(self):
self._start_service()
# avoid wasting time at the event.wait()
CONF.set_override('heartbeat_interval', 0, 'conductor')
with mock.patch.object(self.dbapi, 'touch_conductor',
autospec=True) as mock_touch:
with mock.patch.object(self.service._keepalive_evt,
'is_set', autospec=True) as mock_is_set:
mock_is_set.side_effect = [False, True]
self.service._conductor_service_record_keepalive()
mock_touch.assert_called_once_with(self.hostname)
def test__conductor_service_record_keepalive_failed_db_conn(self):
self._start_service()
# avoid wasting time at the event.wait()
CONF.set_override('heartbeat_interval', 0, 'conductor')
with mock.patch.object(self.dbapi, 'touch_conductor',
autospec=True) as mock_touch:
mock_touch.side_effect = [None, db_exception.DBConnectionError(),
None]
with mock.patch.object(self.service._keepalive_evt,
'is_set', autospec=True) as mock_is_set:
mock_is_set.side_effect = [False, False, False, True]
self.service._conductor_service_record_keepalive()
self.assertEqual(3, mock_touch.call_count)
def test__conductor_service_record_keepalive_failed_error(self):
self._start_service()
# avoid wasting time at the event.wait()
CONF.set_override('heartbeat_interval', 0, 'conductor')
with mock.patch.object(self.dbapi, 'touch_conductor',
autospec=True) as mock_touch:
mock_touch.side_effect = [None, Exception(),
None]
with mock.patch.object(self.service._keepalive_evt,
'is_set', autospec=True) as mock_is_set:
mock_is_set.side_effect = [False, False, False, True]
self.service._conductor_service_record_keepalive()
self.assertEqual(3, mock_touch.call_count)
class ManagerSpawnWorkerTestCase(tests_base.TestCase):
def setUp(self):
super(ManagerSpawnWorkerTestCase, self).setUp()
self.service = manager.ConductorManager('hostname', 'test-topic')
self.executor = mock.Mock(spec=futurist.GreenThreadPoolExecutor)
self.service._executor = self.executor
def test__spawn_worker(self):
self.service._spawn_worker('fake', 1, 2, foo='bar', cat='meow')
self.executor.submit.assert_called_once_with(
'fake', 1, 2, foo='bar', cat='meow')
def test__spawn_worker_none_free(self):
self.executor.submit.side_effect = futurist.RejectedSubmission()
self.assertRaises(exception.NoFreeConductorWorker,
self.service._spawn_worker, 'fake')
@mock.patch.object(objects.Conductor, 'unregister_all_hardware_interfaces',
autospec=True)
@mock.patch.object(objects.Conductor, 'register_hardware_interfaces',
autospec=True)
@mock.patch.object(driver_factory, 'default_interface', autospec=True)
@mock.patch.object(driver_factory, 'enabled_supported_interfaces',
autospec=True)
@mgr_utils.mock_record_keepalive
class RegisterInterfacesTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def setUp(self):
super(RegisterInterfacesTestCase, self).setUp()
self._start_service()
def test__register_and_validate_hardware_interfaces(self,
esi_mock,
default_mock,
reg_mock,
unreg_mock):
# these must be same order as esi_mock side effect
hardware_types = collections.OrderedDict((
('fake-hardware', fake_hardware.FakeHardware()),
('manual-management', generic.ManualManagementHardware),
))
esi_mock.side_effect = [
collections.OrderedDict((
('management', ['fake', 'noop']),
('deploy', ['direct', 'ansible']),
)),
collections.OrderedDict((
('management', ['fake']),
('deploy', ['direct', 'fake']),
)),
]
default_mock.side_effect = ('fake', 'direct', 'fake', 'direct')
expected_calls = [
mock.call(
mock.ANY,
[{'hardware_type': 'fake-hardware',
'interface_type': 'management',
'interface_name': 'fake',
'default': True},
{'hardware_type': 'fake-hardware',
'interface_type': 'management',
'interface_name': 'noop',
'default': False},
{'hardware_type': 'fake-hardware',
'interface_type': 'deploy',
'interface_name': 'direct',
'default': True},
{'hardware_type': 'fake-hardware',
'interface_type': 'deploy',
'interface_name': 'ansible',
'default': False},
{'hardware_type': 'manual-management',
'interface_type': 'management',
'interface_name': 'fake',
'default': True},
{'hardware_type': 'manual-management',
'interface_type': 'deploy',
'interface_name': 'direct',
'default': True},
{'hardware_type': 'manual-management',
'interface_type': 'deploy',
'interface_name': 'fake',
'default': False}]
)
]
self.service._register_and_validate_hardware_interfaces(hardware_types)
unreg_mock.assert_called_once_with(mock.ANY)
# we're iterating over dicts, don't worry about order
reg_mock.assert_has_calls(expected_calls)
def test__register_and_validate_no_valid_default(self,
esi_mock,
default_mock,
reg_mock,
unreg_mock):
# these must be same order as esi_mock side effect
hardware_types = collections.OrderedDict((
('fake-hardware', fake_hardware.FakeHardware()),
))
esi_mock.side_effect = [
collections.OrderedDict((
('management', ['fake', 'noop']),
('deploy', ['direct', 'ansible']),
)),
]
default_mock.side_effect = exception.NoValidDefaultForInterface("boo")
self.assertRaises(
exception.NoValidDefaultForInterface,
self.service._register_and_validate_hardware_interfaces,
hardware_types)
default_mock.assert_called_once_with(
hardware_types['fake-hardware'],
mock.ANY, driver_name='fake-hardware')
unreg_mock.assert_called_once_with(mock.ANY)
self.assertFalse(reg_mock.called)
@mock.patch.object(fake.FakeConsole, 'start_console', autospec=True)
@mock.patch.object(notification_utils, 'emit_console_notification',
autospec=True)
class StartConsolesTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__start_consoles(self, mock_notify, mock_start_console):
obj_utils.create_test_node(self.context,
driver='fake-hardware',
console_enabled=True)
obj_utils.create_test_node(
self.context,
uuid=uuidutils.generate_uuid(),
driver='fake-hardware',
console_enabled=True
)
obj_utils.create_test_node(
self.context,
uuid=uuidutils.generate_uuid(),
driver='fake-hardware'
)
self._start_service()
self.service._start_consoles(self.context)
self.assertEqual(2, mock_start_console.call_count)
mock_notify.assert_has_calls(
[mock.call(mock.ANY, 'console_restore',
fields.NotificationStatus.START),
mock.call(mock.ANY, 'console_restore',
fields.NotificationStatus.END)])
def test__start_consoles_no_console_enabled(self, mock_notify,
mock_start_console):
obj_utils.create_test_node(self.context,
driver='fake-hardware',
console_enabled=False)
self._start_service()
self.service._start_consoles(self.context)
self.assertFalse(mock_start_console.called)
self.assertFalse(mock_notify.called)
def test__start_consoles_failed(self, mock_notify, mock_start_console):
test_node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
console_enabled=True)
self._start_service()
mock_start_console.side_effect = Exception()
self.service._start_consoles(self.context)
mock_start_console.assert_called_once_with(mock.ANY, mock.ANY)
test_node.refresh()
self.assertFalse(test_node.console_enabled)
self.assertIsNotNone(test_node.last_error)
mock_notify.assert_has_calls(
[mock.call(mock.ANY, 'console_restore',
fields.NotificationStatus.START),
mock.call(mock.ANY, 'console_restore',
fields.NotificationStatus.ERROR)])
history = objects.NodeHistory.list_by_node_id(self.context,
test_node.id)
entry = history[0]
self.assertEqual('startup failure', entry['event_type'])
self.assertEqual('ERROR', entry['severity'])
self.assertIsNotNone(entry['event'])
@mock.patch.object(base_manager, 'LOG', autospec=True)
def test__start_consoles_node_locked(self, log_mock, mock_notify,
mock_start_console):
test_node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
console_enabled=True,
reservation='fake-host')
self._start_service()
self.service._start_consoles(self.context)
self.assertFalse(mock_start_console.called)
test_node.refresh()
self.assertTrue(test_node.console_enabled)
self.assertIsNone(test_node.last_error)
self.assertTrue(log_mock.warning.called)
self.assertFalse(mock_notify.called)
@mock.patch.object(base_manager, 'LOG', autospec=True)
def test__start_consoles_node_not_found(self, log_mock, mock_notify,
mock_start_console):
test_node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
console_enabled=True)
self._start_service()
with mock.patch.object(task_manager, 'acquire',
autospec=True) as mock_acquire:
mock_acquire.side_effect = exception.NodeNotFound(node='not found')
self.service._start_consoles(self.context)
self.assertFalse(mock_start_console.called)
test_node.refresh()
self.assertTrue(test_node.console_enabled)
self.assertIsNone(test_node.last_error)
self.assertTrue(log_mock.warning.called)
self.assertFalse(mock_notify.called)
class MiscTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def setUp(self):
super(MiscTestCase, self).setUp()
self._start_service()
def test__fail_transient_state(self):
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
provision_state=states.DEPLOYING)
self.service._fail_transient_state(states.DEPLOYING, 'unknown err')
node.refresh()
self.assertEqual(states.DEPLOYFAIL, node.provision_state)
def test__fail_transient_state_maintenance(self):
node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
maintenance=True,
provision_state=states.DEPLOYING)
self.service._fail_transient_state(states.DEPLOYING, 'unknown err')
node.refresh()
self.assertEqual(states.DEPLOYFAIL, node.provision_state)
history = objects.NodeHistory.list_by_node_id(self.context,
node.id)
entry = history[0]
self.assertEqual('transition', entry['event_type'])
self.assertEqual('ERROR', entry['severity'])
self.assertEqual('unknown err', entry['event'])
|
|
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from functools import reduce
from docker.errors import APIError
from docker.errors import NotFound
from . import parallel
from .config import ConfigurationError
from .config.sort_services import get_service_name_from_net
from .const import DEFAULT_TIMEOUT
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .container import Container
from .service import ContainerNet
from .service import ConvergenceStrategy
from .service import Net
from .service import Service
from .service import ServiceNet
log = logging.getLogger(__name__)
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client, use_networking=False, network_driver=None):
self.name = name
self.services = services
self.client = client
self.use_networking = use_networking
self.network_driver = network_driver
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False"),
]
@classmethod
def from_dicts(cls, name, service_dicts, client, use_networking=False, network_driver=None):
"""
Construct a ServiceCollection from a list of dicts representing services.
"""
project = cls(name, [], client, use_networking=use_networking, network_driver=network_driver)
if use_networking:
remove_links(service_dicts)
for service_dict in service_dicts:
links = project.get_links(service_dict)
volumes_from = project.get_volumes_from(service_dict)
net = project.get_net(service_dict)
project.services.append(
Service(
client=client,
project=name,
use_networking=use_networking,
links=links,
net=net,
volumes_from=volumes_from,
**service_dict))
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
return self.get_services(
service_names=self.service_names,
include_deps=include_deps
)
else:
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError(
'Service "%s" has a link to service "%s" which does not '
'exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_volumes_from(self, service_dict):
volumes_from = []
if 'volumes_from' in service_dict:
for volume_from_spec in service_dict.get('volumes_from', []):
# Get service
try:
service = self.get_service(volume_from_spec.source)
volume_from_spec = volume_from_spec._replace(source=service)
except NoSuchService:
try:
container = Container.from_id(self.client, volume_from_spec.source)
volume_from_spec = volume_from_spec._replace(source=container)
except APIError:
raise ConfigurationError(
'Service "%s" mounts volumes from "%s", which is '
'not the name of a service or container.' % (
service_dict['name'],
volume_from_spec.source))
volumes_from.append(volume_from_spec)
del service_dict['volumes_from']
return volumes_from
def get_net(self, service_dict):
net = service_dict.pop('net', None)
if not net:
if self.use_networking:
return Net(self.name)
return Net(None)
net_name = get_service_name_from_net(net)
if not net_name:
return Net(net)
try:
return ServiceNet(self.get_service(net_name))
except NoSuchService:
pass
try:
return ContainerNet(Container.from_id(self.client, net_name))
except APIError:
raise ConfigurationError(
'Service "%s" is trying to use the network of "%s", '
'which is not the name of a service or container.' % (
service_dict['name'],
net_name))
def start(self, service_names=None, **options):
for service in self.get_services(service_names):
service.start(**options)
def stop(self, service_names=None, **options):
parallel.parallel_stop(self.containers(service_names), options)
def pause(self, service_names=None, **options):
parallel.parallel_pause(reversed(self.containers(service_names)), options)
def unpause(self, service_names=None, **options):
parallel.parallel_unpause(self.containers(service_names), options)
def kill(self, service_names=None, **options):
parallel.parallel_kill(self.containers(service_names), options)
def remove_stopped(self, service_names=None, **options):
parallel.parallel_remove(self.containers(service_names, stopped=True), options)
def restart(self, service_names=None, **options):
parallel.parallel_restart(self.containers(service_names, stopped=True), options)
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache, pull, force_rm)
else:
log.info('%s uses an image, skipping' % service.name)
def up(self,
service_names=None,
start_deps=True,
strategy=ConvergenceStrategy.changed,
do_build=True,
timeout=DEFAULT_TIMEOUT,
detached=False):
services = self.get_services(service_names, include_deps=start_deps)
for service in services:
service.remove_duplicate_containers()
plans = self._get_convergence_plans(services, strategy)
if self.use_networking and self.uses_default_network():
self.ensure_network_exists()
return [
container
for service in services
for container in service.execute_convergence_plan(
plans[service.name],
do_build=do_build,
timeout=timeout,
detached=detached
)
]
def _get_convergence_plans(self, services, strategy):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans
and plans[name].action in ('recreate', 'create')
]
if updated_dependencies and strategy.allows_recreate:
log.debug('%s has upstream changes (%s)',
service.name,
", ".join(updated_dependencies))
plan = service.convergence_plan(ConvergenceStrategy.always)
else:
plan = service.convergence_plan(strategy)
plans[service.name] = plan
return plans
def pull(self, service_names=None, ignore_pull_failures=False):
for service in self.get_services(service_names, include_deps=False):
service.pull(ignore_pull_failures)
def containers(self, service_names=None, stopped=False, one_off=False):
if service_names:
self.validate_service_names(service_names)
else:
service_names = self.service_names
containers = list(filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]))
def matches_service_names(container):
return container.labels.get(LABEL_SERVICE) in service_names
return [c for c in containers if matches_service_names(c)]
def get_network(self):
try:
return self.client.inspect_network(self.name)
except NotFound:
return None
def ensure_network_exists(self):
# TODO: recreate network if driver has changed?
if self.get_network() is None:
log.info(
'Creating network "{}" with driver "{}"'
.format(self.name, self.network_driver)
)
self.client.create_network(self.name, driver=self.network_driver)
def remove_network(self):
network = self.get_network()
if network:
self.client.remove_network(network['Id'])
def uses_default_network(self):
return any(service.net.mode == self.name for service in self.services)
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
def remove_links(service_dicts):
services_with_links = [s for s in service_dicts if 'links' in s]
if not services_with_links:
return
if len(services_with_links) == 1:
prefix = '"{}" defines'.format(services_with_links[0]['name'])
else:
prefix = 'Some services ({}) define'.format(
", ".join('"{}"'.format(s['name']) for s in services_with_links))
log.warn(
'\n{} links, which are not compatible with Docker networking and will be ignored.\n'
'Future versions of Docker will not support links - you should remove them for '
'forwards-compatibility.\n'.format(prefix))
for s in services_with_links:
del s['links']
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
|
|
#!/usr/bin/env python
# MBUtil: a tool for MBTiles files
# Supports importing, exporting, and more
#
# (c) Development Seed 2012
# Licensed under BSD
# for additional reference on schema see:
# https://github.com/mapbox/node-mbtiles/blob/master/lib/schema.sql
import sqlite3, sys, logging, time, os, json, zlib, re
logger = logging.getLogger(__name__)
def flip_y(zoom, y):
return (2**zoom-1) - y
def mbtiles_setup(cur):
cur.execute("""
create table tiles (
zoom_level integer,
tile_column integer,
tile_row integer,
tile_data blob);
""")
cur.execute("""create table metadata
(name text, value text);""")
cur.execute("""CREATE TABLE grids (zoom_level integer, tile_column integer,
tile_row integer, grid blob);""")
cur.execute("""CREATE TABLE grid_data (zoom_level integer, tile_column
integer, tile_row integer, key_name text, key_json text);""")
cur.execute("""create unique index name on metadata (name);""")
cur.execute("""create unique index tile_index on tiles
(zoom_level, tile_column, tile_row);""")
def mbtiles_connect(mbtiles_file, silent):
try:
con = sqlite3.connect(mbtiles_file)
return con
except Exception as e:
if not silent:
logger.error("Could not connect to database")
logger.exception(e)
sys.exit(1)
def optimize_connection(cur):
cur.execute("""PRAGMA synchronous=0""")
cur.execute("""PRAGMA locking_mode=EXCLUSIVE""")
cur.execute("""PRAGMA journal_mode=DELETE""")
def compression_prepare(cur, silent):
if not silent:
logger.debug('Prepare database compression.')
cur.execute("""
CREATE TABLE if not exists images (
tile_data blob,
tile_id integer);
""")
cur.execute("""
CREATE TABLE if not exists map (
zoom_level integer,
tile_column integer,
tile_row integer,
tile_id integer);
""")
def optimize_database(cur, silent):
if not silent:
logger.debug('analyzing db')
cur.execute("""ANALYZE;""")
if not silent:
logger.debug('cleaning db')
# Workaround for python>=3.6.0,python<3.6.2
# https://bugs.python.org/issue28518
cur.isolation_level = None
cur.execute("""VACUUM;""")
cur.isolation_level = '' # reset default value of isolation_level
def compression_do(cur, con, chunk, silent):
if not silent:
logger.debug('Making database compression.')
overlapping = 0
unique = 0
total = 0
cur.execute("select count(zoom_level) from tiles")
res = cur.fetchone()
total_tiles = res[0]
last_id = 0
if not silent:
logging.debug("%d total tiles to fetch" % total_tiles)
for i in range(total_tiles // chunk + 1):
if not silent:
logging.debug("%d / %d rounds done" % (i, (total_tiles / chunk)))
ids = []
files = []
start = time.time()
cur.execute("""select zoom_level, tile_column, tile_row, tile_data
from tiles where rowid > ? and rowid <= ?""", ((i * chunk), ((i + 1) * chunk)))
if not silent:
logger.debug("select: %s" % (time.time() - start))
rows = cur.fetchall()
for r in rows:
total = total + 1
if r[3] in files:
overlapping = overlapping + 1
start = time.time()
query = """insert into map
(zoom_level, tile_column, tile_row, tile_id)
values (?, ?, ?, ?)"""
if not silent:
logger.debug("insert: %s" % (time.time() - start))
cur.execute(query, (r[0], r[1], r[2], ids[files.index(r[3])]))
else:
unique = unique + 1
last_id += 1
ids.append(last_id)
files.append(r[3])
start = time.time()
query = """insert into images
(tile_id, tile_data)
values (?, ?)"""
cur.execute(query, (str(last_id), sqlite3.Binary(r[3])))
if not silent:
logger.debug("insert into images: %s" % (time.time() - start))
start = time.time()
query = """insert into map
(zoom_level, tile_column, tile_row, tile_id)
values (?, ?, ?, ?)"""
cur.execute(query, (r[0], r[1], r[2], last_id))
if not silent:
logger.debug("insert into map: %s" % (time.time() - start))
con.commit()
def compression_finalize(cur, con, silent):
if not silent:
logger.debug('Finalizing database compression.')
cur.execute("""drop table tiles;""")
cur.execute("""create view tiles as
select map.zoom_level as zoom_level,
map.tile_column as tile_column,
map.tile_row as tile_row,
images.tile_data as tile_data FROM
map JOIN images on images.tile_id = map.tile_id;""")
cur.execute("""
CREATE UNIQUE INDEX map_index on map
(zoom_level, tile_column, tile_row);""")
cur.execute("""
CREATE UNIQUE INDEX images_id on images
(tile_id);""")
# Workaround for python>=3.6.0,python<3.6.2
# https://bugs.python.org/issue28518
con.isolation_level = None
cur.execute("""vacuum;""")
con.isolation_level = '' # reset default value of isolation_level
cur.execute("""analyze;""")
def get_dirs(path):
return [name for name in os.listdir(path)
if os.path.isdir(os.path.join(path, name))]
def disk_to_mbtiles(directory_path, mbtiles_file, **kwargs):
silent = kwargs.get('silent')
if not silent:
logger.info("Importing disk to MBTiles")
logger.debug("%s --> %s" % (directory_path, mbtiles_file))
con = mbtiles_connect(mbtiles_file, silent)
cur = con.cursor()
optimize_connection(cur)
mbtiles_setup(cur)
#~ image_format = 'png'
image_format = kwargs.get('format', 'png')
try:
metadata = json.load(open(os.path.join(directory_path, 'metadata.json'), 'r'))
image_format = kwargs.get('format')
for name, value in metadata.items():
cur.execute('insert into metadata (name, value) values (?, ?)',
(name, value))
if not silent:
logger.info('metadata from metadata.json restored')
except IOError:
if not silent:
logger.warning('metadata.json not found')
count = 0
start_time = time.time()
for zoom_dir in get_dirs(directory_path):
if kwargs.get("scheme") == 'ags':
if not "L" in zoom_dir:
if not silent:
logger.warning("You appear to be using an ags scheme on an non-arcgis Server cache.")
z = int(zoom_dir.replace("L", ""))
elif kwargs.get("scheme") == 'gwc':
z=int(zoom_dir[-2:])
else:
if "L" in zoom_dir:
if not silent:
logger.warning("You appear to be using a %s scheme on an arcgis Server cache. Try using --scheme=ags instead" % kwargs.get("scheme"))
z = int(zoom_dir)
for row_dir in get_dirs(os.path.join(directory_path, zoom_dir)):
if kwargs.get("scheme") == 'ags':
y = flip_y(z, int(row_dir.replace("R", ""), 16))
elif kwargs.get("scheme") == 'gwc':
pass
elif kwargs.get("scheme") == 'zyx':
y = flip_y(int(z), int(row_dir))
else:
x = int(row_dir)
for current_file in os.listdir(os.path.join(directory_path, zoom_dir, row_dir)):
if current_file == ".DS_Store" and not silent:
logger.warning("Your OS is MacOS,and the .DS_Store file will be ignored.")
else:
file_name, ext = current_file.split('.',1)
f = open(os.path.join(directory_path, zoom_dir, row_dir, current_file), 'rb')
file_content = f.read()
f.close()
if kwargs.get('scheme') == 'xyz':
y = flip_y(int(z), int(file_name))
elif kwargs.get("scheme") == 'ags':
x = int(file_name.replace("C", ""), 16)
elif kwargs.get("scheme") == 'gwc':
x, y = file_name.split('_')
x = int(x)
y = int(y)
elif kwargs.get("scheme") == 'zyx':
x = int(file_name)
else:
y = int(file_name)
if (ext == image_format):
if not silent:
logger.debug(' Read tile from Zoom (z): %i\tCol (x): %i\tRow (y): %i' % (z, x, y))
cur.execute("""insert into tiles (zoom_level,
tile_column, tile_row, tile_data) values
(?, ?, ?, ?);""",
(z, x, y, sqlite3.Binary(file_content)))
count = count + 1
if (count % 100) == 0 and not silent:
logger.info(" %s tiles inserted (%d tiles/sec)" % (count, count / (time.time() - start_time)))
elif (ext == 'grid.json'):
if not silent:
logger.debug(' Read grid from Zoom (z): %i\tCol (x): %i\tRow (y): %i' % (z, x, y))
# Remove potential callback with regex
file_content = file_content.decode('utf-8')
has_callback = re.match(r'[\w\s=+-/]+\(({(.|\n)*})\);?', file_content)
if has_callback:
file_content = has_callback.group(1)
utfgrid = json.loads(file_content)
data = utfgrid.pop('data')
compressed = zlib.compress(json.dumps(utfgrid).encode())
cur.execute("""insert into grids (zoom_level, tile_column, tile_row, grid) values (?, ?, ?, ?) """, (z, x, y, sqlite3.Binary(compressed)))
grid_keys = [k for k in utfgrid['keys'] if k != ""]
for key_name in grid_keys:
key_json = data[key_name]
cur.execute("""insert into grid_data (zoom_level, tile_column, tile_row, key_name, key_json) values (?, ?, ?, ?, ?);""", (z, x, y, key_name, json.dumps(key_json)))
if not silent:
logger.debug('tiles (and grids) inserted.')
if kwargs.get('compression', False):
compression_prepare(cur, silent)
compression_do(cur, con, 256, silent)
compression_finalize(cur, con, silent)
optimize_database(con, silent)
def mbtiles_metadata_to_disk(mbtiles_file, **kwargs):
silent = kwargs.get('silent')
if not silent:
logger.debug("Exporting MBTiles metatdata from %s" % (mbtiles_file))
con = mbtiles_connect(mbtiles_file, silent)
metadata = dict(con.execute('select name, value from metadata;').fetchall())
if not silent:
logger.debug(json.dumps(metadata, indent=2))
def mbtiles_to_disk(mbtiles_file, directory_path, **kwargs):
silent = kwargs.get('silent')
if not silent:
logger.debug("Exporting MBTiles to disk")
logger.debug("%s --> %s" % (mbtiles_file, directory_path))
con = mbtiles_connect(mbtiles_file, silent)
os.mkdir("%s" % directory_path)
metadata = dict(con.execute('select name, value from metadata;').fetchall())
json.dump(metadata, open(os.path.join(directory_path, 'metadata.json'), 'w'), indent=4)
count = con.execute('select count(zoom_level) from tiles;').fetchone()[0]
done = 0
base_path = directory_path
if not os.path.isdir(base_path):
os.makedirs(base_path)
# if interactivity
formatter = metadata.get('formatter')
if formatter:
layer_json = os.path.join(base_path, 'layer.json')
formatter_json = {"formatter":formatter}
open(layer_json, 'w').write(json.dumps(formatter_json))
tiles = con.execute('select zoom_level, tile_column, tile_row, tile_data from tiles;')
t = tiles.fetchone()
while t:
z = t[0]
x = t[1]
y = t[2]
if kwargs.get('scheme') == 'xyz':
y = flip_y(z,y)
if not silent:
logger.debug('flipping')
tile_dir = os.path.join(base_path, str(z), str(x))
elif kwargs.get('scheme') == 'wms':
tile_dir = os.path.join(base_path,
"%02d" % (z),
"%03d" % (int(x) / 1000000),
"%03d" % ((int(x) / 1000) % 1000),
"%03d" % (int(x) % 1000),
"%03d" % (int(y) / 1000000),
"%03d" % ((int(y) / 1000) % 1000))
else:
tile_dir = os.path.join(base_path, str(z), str(x))
if not os.path.isdir(tile_dir):
os.makedirs(tile_dir)
if kwargs.get('scheme') == 'wms':
tile = os.path.join(tile_dir,'%03d.%s' % (int(y) % 1000, kwargs.get('format', 'png')))
else:
tile = os.path.join(tile_dir,'%s.%s' % (y, kwargs.get('format', 'png')))
f = open(tile, 'wb')
f.write(t[3])
f.close()
done = done + 1
if not silent:
logger.info('%s / %s tiles exported' % (done, count))
t = tiles.fetchone()
# grids
callback = kwargs.get('callback')
done = 0
try:
count = con.execute('select count(zoom_level) from grids;').fetchone()[0]
grids = con.execute('select zoom_level, tile_column, tile_row, grid from grids;')
g = grids.fetchone()
except sqlite3.OperationalError:
g = None # no grids table
while g:
zoom_level = g[0] # z
tile_column = g[1] # x
y = g[2] # y
grid_data_cursor = con.execute('''select key_name, key_json FROM
grid_data WHERE
zoom_level = %(zoom_level)d and
tile_column = %(tile_column)d and
tile_row = %(y)d;''' % locals() )
if kwargs.get('scheme') == 'xyz':
y = flip_y(zoom_level,y)
grid_dir = os.path.join(base_path, str(zoom_level), str(tile_column))
if not os.path.isdir(grid_dir):
os.makedirs(grid_dir)
grid = os.path.join(grid_dir,'%s.grid.json' % (y))
f = open(grid, 'w')
grid_json = json.loads(zlib.decompress(g[3]).decode('utf-8'))
# join up with the grid 'data' which is in pieces when stored in mbtiles file
grid_data = grid_data_cursor.fetchone()
data = {}
while grid_data:
data[grid_data[0]] = json.loads(grid_data[1])
grid_data = grid_data_cursor.fetchone()
grid_json['data'] = data
if callback in (None, "", "false", "null"):
f.write(json.dumps(grid_json))
else:
f.write('%s(%s);' % (callback, json.dumps(grid_json)))
f.close()
done = done + 1
if not silent:
logger.info('%s / %s grids exported' % (done, count))
g = grids.fetchone()
|
|
import numpy as np
from numpy.random import rand
import scipy.linalg as la
from numpy.testing import assert_, assert_equal, run_module_suite
import scipy
from qutip import (rand_dm, rand_unitary, spre, spost, vector_to_operator,
operator_to_vector, mat2vec, vec2mat, vec2mat_index,
mat2vec_index, tensor, sprepost, to_super, reshuffle,
identity, destroy, create, qeye, QobjEvo, Qobj)
from qutip.superoperator import liouvillian, liouvillian_ref, \
lindblad_dissipator
def f(t, args):
return t*(1-0.5j)
class TestMatVec:
"""
A test class for the QuTiP function for matrix/vector conversion.
"""
def testOperatorVector(self):
"""
Superoperator: Operator - vector - operator conversion.
"""
N = 3
rho1 = rand_dm(N)
rho2 = vector_to_operator(operator_to_vector(rho1))
assert_((rho1 - rho2).norm() < 1e-8)
def testOperatorVectorTensor(self):
"""
Superoperator: Operator - vector - operator conversion with a tensor product state.
"""
Na = 3
Nb = 2
rhoa = rand_dm(Na)
rhob = rand_dm(Nb)
rho1 = tensor(rhoa, rhob)
rho2 = vector_to_operator(operator_to_vector(rho1))
assert_((rho1 - rho2).norm() < 1e-8)
def testOperatorVectorNotSquare(self):
"""
Superoperator: Operator - vector - operator conversion for non-square matrix.
"""
op1 = Qobj(np.random.rand(6).reshape((3, 2)))
op2 = vector_to_operator(operator_to_vector(op1))
assert_((op1 - op2).norm() < 1e-8)
def testOperatorSpreAppl(self):
"""
Superoperator: apply operator and superoperator from left (spre)
"""
N = 3
rho = rand_dm(N)
U = rand_unitary(N)
rho1 = U * rho
rho2_vec = spre(U) * operator_to_vector(rho)
rho2 = vector_to_operator(rho2_vec)
assert_((rho1 - rho2).norm() < 1e-8)
def testOperatorSpostAppl(self):
"""
Superoperator: apply operator and superoperator from right (spost)
"""
N = 3
rho = rand_dm(N)
U = rand_unitary(N)
rho1 = rho * U
rho2_vec = spost(U) * operator_to_vector(rho)
rho2 = vector_to_operator(rho2_vec)
assert_((rho1 - rho2).norm() < 1e-8)
def testOperatorUnitaryTransform(self):
"""
Superoperator: Unitary transformation with operators and superoperators
"""
N = 3
rho = rand_dm(N)
U = rand_unitary(N)
rho1 = U * rho * U.dag()
rho2_vec = spre(U) * spost(U.dag()) * operator_to_vector(rho)
rho2 = vector_to_operator(rho2_vec)
assert_((rho1 - rho2).norm() < 1e-8)
def testMatrixVecMat(self):
"""
Superoperator: Conversion matrix to vector to matrix
"""
M = rand(10, 10)
V = mat2vec(M)
M2 = vec2mat(V)
assert_(la.norm(M - M2) == 0.0)
def testVecMatVec(self):
"""
Superoperator: Conversion vector to matrix to vector
"""
V = rand(100) # a row vector
M = vec2mat(V)
V2 = mat2vec(M).T # mat2vec returns a column vector
assert_(la.norm(V - V2) == 0.0)
def testVecMatIndexConversion(self):
"""
Superoperator: Conversion between matrix and vector indices
"""
N = 10
for I in range(N * N):
i, j = vec2mat_index(N, I)
I2 = mat2vec_index(N, i, j)
assert_(I == I2)
def testVecMatIndexCompability(self):
"""
Superoperator: Compatibility between matrix/vector and
corresponding index conversions.
"""
N = 10
M = rand(N, N)
V = mat2vec(M)
for I in range(N * N):
i, j = vec2mat_index(N, I)
assert_(V[I][0] == M[i, j])
def test_reshuffle(self):
U1 = rand_unitary(2)
U2 = rand_unitary(3)
U3 = rand_unitary(4)
U = tensor(U1, U2, U3)
S = to_super(U)
S_col = reshuffle(S)
assert_equal(S_col.dims[0], [[2, 2], [3, 3], [4, 4]])
assert_(reshuffle(S_col) == S)
def test_sprepost(self):
U1 = rand_unitary(3)
U2 = rand_unitary(3)
S1 = spre(U1) * spost(U2)
S2 = sprepost(U1, U2)
assert_(S1 == S2)
def testLiouvillianImplem(self):
"""
Superoperator: Randomized comparison of standard and reference
Liouvillian functions.
"""
N1 = 3
N2 = 4
N3 = 5
a1 = tensor(rand_dm(N1, density=0.75), identity(N2), identity(N3))
a2 = tensor(identity(N1), rand_dm(N2, density=0.75), identity(N3))
a3 = tensor(identity(N1), identity(N2), rand_dm(N3, density=0.75))
H = a1.dag() * a1 + a2.dag() * a2 + a3.dag() * a3
c_ops = [np.sqrt(0.01) * a1, np.sqrt(0.025) * a2, np.sqrt(0.05) * a3]
L1 = liouvillian(H, c_ops)
L2 = liouvillian_ref(H, c_ops)
assert_((L1 - L2).norm('max') < 1e-8)
class TestSuper_td:
"""
A test class for the QuTiP superoperator functions.
"""
def setup_method(self):
N = 3
self.t1 = QobjEvo([qeye(N)*(1.+0.1j),[create(N)*(1.-0.1j),f]])
self.t2 = QobjEvo([destroy(N)*(1.-0.2j)])
self.t3 = QobjEvo([[destroy(N)*create(N)*(1.+0.2j),f]])
self.q1 = qeye(N)*(1.+0.3j)
self.q2 = destroy(N)*(1.-0.3j)
self.q3 = destroy(N)*create(N)*(1.+0.4j)
def test_spre_td(self):
"Superoperator: spre, time-dependent"
assert_(spre(self.t1)(.5) == spre(self.t1(.5)))
def test_spost_td(self):
"Superoperator: spre, time-dependent"
assert_(spost(self.t1)(.5) == spost(self.t1(.5)))
def test_sprepost_td(self):
"Superoperator: sprepost, time-dependent"
# left QobjEvo
assert_(sprepost(self.t1, self.q2)(.5) ==
sprepost(self.t1(.5), self.q2))
# left QobjEvo
assert_(sprepost(self.q2, self.t1)(.5) ==
sprepost(self.q2, self.t1(.5)))
# left 2 QobjEvo, one cte
assert_(sprepost(self.t1, self.t2)(.5) ==
sprepost(self.t1(.5), self.t2(.5)))
def test_operator_vector_td(self):
"Superoperator: operator_to_vector, time-dependent"
assert_(operator_to_vector(self.t1)(.5) ==
operator_to_vector(self.t1(.5)))
vec = operator_to_vector(self.t1)
assert_(vector_to_operator(vec)(.5) == vector_to_operator(vec(.5)))
def test_liouvillian_td(self):
"Superoperator: liouvillian, time-dependent"
assert_(liouvillian(self.t1)(0.5) == liouvillian(self.t1(0.5)))
assert_(liouvillian(None, [self.t2])(0.5) ==
liouvillian(None, [self.t2(0.5)]))
assert_(liouvillian(self.t1, [self.t2, self.q1, self.t3],
chi=[1,2,3])(0.5) ==
liouvillian(self.t1(0.5), [self.t2(0.5), self.q1, self.t3(0.5)],
chi=[1,2,3]))
def test_lindblad_dissipator_td(self):
"Superoperator: lindblad_dissipator, time-dependent"
assert_(lindblad_dissipator(self.t2)(.5) ==
lindblad_dissipator(self.t2(.5)))
assert_(lindblad_dissipator(self.t2, self.q1)(.5) ==
lindblad_dissipator(self.t2(.5), self.q1))
assert_(lindblad_dissipator(self.q1, self.t2)(.5) ==
lindblad_dissipator(self.q1, self.t2(.5)))
if __name__ == "__main__":
run_module_suite()
|
|
"""
bughipster.website.views
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2014 by Xavier Ordoquy, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django import http
from django.views import generic
from django.core.urlresolvers import reverse
from bughipster.project import models, filters
from .login import LoginMixin
class ToDo(generic.TemplateView):
template_name = 'todo.html'
class Error(generic.TemplateView):
template_name = 'error.html'
message = None
def get_context_data(self, **kwargs):
result = super(Error, self).get_context_data(**kwargs)
result['message'] = self.message
return result
class Home(LoginMixin, generic.TemplateView):
template_name = 'home.html'
def get(self, request, *args, **kwargs):
if 'logout' in self.request.GET:
return http.HttpResponseRedirect(reverse('logout'))
# if self.request.GET:
# raise ValueError(self.request.GET)
return super(Home, self).get(request, *args, **kwargs)
class UnconfiguredHome(generic.TemplateView):
template_name = 'unconfigured.html'
class CreateAccount(generic.TemplateView):
template_name = 'create_account.html'
class SimpleQuery(generic.TemplateView):
template_name = 'simple_search.html'
def get_context_data(self, **kwargs):
result = super(SimpleQuery, self).get_context_data(**kwargs)
# f = ProductFilter(request.GET, queryset=Product.objects.all())
result['form'] = filters.SimpleQuery({}, queryset=models.Bug.objects.all()).form
return result
#
# Complex query page helpers
#
SUMMARY_TYPE = (
("allwordssubstr", "contains all of the strings"),
("anywordssubstr", "contains any of the strings"),
("substring", "contains the string"),
("casesubstring", "contains the string (exact case)"),
("allwords", "contains all of the words"),
("anywords", "contains any of the words"),
("regexp", "matches regular expression"),
("notregexp", "does not match regular expression"),
)
BUG_INCLUSION_TYPE = (
("anyexact", "only included in"),
("nowords", "excluded from"),
)
EMAIL_TYPE = (
("substring", "contains"),
("notsubstring", "doesn't contain"),
("exact", "is"),
("notequals", "is not"),
("regexp", "matches regexp"),
("notregexp", "doesn't match regexp"),
)
BUG_TYPE = (
("noop", "---"),
("equals", "is equal to"),
("notequals", "is not equal to"),
("anyexact", "is equal to any of the strings"),
("substring", "contains the string"),
("casesubstring", "contains the string (exact case)"),
("notsubstring", "does not contain the string"),
("anywordssubstr", "contains any of the strings"),
("allwordssubstr", "contains all of the strings"),
("nowordssubstr", "contains none of the strings"),
("regexp", "matches regular expression"),
("notregexp", "does not match regular expression"),
("lessthan", "is less than"),
("lessthaneq", "is less than or equal to"),
("greaterthan", "is greater than"),
("greaterthaneq", "is greater than or equal to"),
("anywords", "contains any of the words"),
("allwords", "contains all of the words"),
("nowords", "contains none of the words"),
("changedbefore", "changed before"),
("changedafter", "changed after"),
("changedfrom", "changed from"),
("changedto", "changed to"),
("changedby", "changed by"),
("matches", "matches"),
("notmatches", "does not match"),
)
CUSTOM_SEARCH = (
("noop", "---"),
("alias", "Alias"),
("assigned_to", "Assignee"),
("attachments.submitter", "Attachment creator"),
("attach_data.thedata", "Attachment data"),
("attachments.description", "Attachment description"),
("attachments.filename", "Attachment filename"),
("attachments.isobsolete", "Attachment is obsolete"),
("attachments.ispatch", "Attachment is patch"),
("attachments.isprivate", "Attachment is private"),
("attachments.mimetype", "Attachment mime type"),
("blocked", "Blocks"),
("bug_id", "Bug ID"),
("cc", "CC"),
("cclist_accessible", "CC list accessible"),
("classification", "Classification"),
("longdesc", "Comment"),
("longdescs.isprivate", "Comment is private"),
("commenter", "Commenter"),
("component", "Component"),
("content", "Content"),
("creation_ts", "Creation date"),
("days_elapsed", "Days since bug changed"),
("dependson", "Depends on"),
("everconfirmed", "Ever confirmed"),
("requestees.login_name", "Flag Requestee"),
("setters.login_name", "Flag Setter"),
("flagtypes.name", "Flags"),
("bug_group", "Group"),
("keywords", "Keywords"),
("delta_ts", "Changed"),
("longdescs.count", "Number of Comments"),
("op_sys", "OS"),
("rep_platform", "Hardware"),
("priority", "Priority"),
("product", "Product"),
("qa_contact", "QA Contact"),
("reporter", "Reporter"),
("reporter_accessible", "Reporter accessible"),
("resolution", "Resolution"),
("see_also", "See Also"),
("bug_severity", "Severity"),
("bug_status", "Status"),
("status_whiteboard", "Whiteboard"),
("short_desc", "Summary"),
("tag", "Tags"),
("target_milestone", "Target Milestone"),
("owner_idle_time", "Time Since Assignee Touched"),
("bug_file_loc", "URL"),
("version", "Version"),
)
def item_per_project(items):
"""
Create a dictionary of {project_id: [item_ids]} which allows the page
to show/hide items according to the project id.
- items is a list of item
- item must have an id
- item must have an associated project
"""
result = {}
for item in items:
result.setdefault(item.product_id, []).append(item.id)
return result
def remove_duplicates(items):
"""
Remove duplicated names from the available choices.
Returns the cleaned list and the duplicates dictionary
"""
items_map = {}
duplicates = {}
duplicates_count = {}
for item in items:
items_map.setdefault(item.value, []).append(item.id)
for grouped_items in items_map.values():
if len(grouped_items) > 1:
reference = grouped_items.pop()
local_dict = dict((item, reference) for item in grouped_items)
duplicates.update(local_dict)
duplicates_count[reference] = len(local_dict)
return {
'duplicates': duplicates,
'count': duplicates_count,
}
class ComplexQuery(generic.TemplateView):
template_name = 'complex_search.html'
def get_context_data(self, **kwargs):
result = super(ComplexQuery, self).get_context_data(**kwargs)
# Generic data
result['statuses'] = list(models.Status.objects.all().order_by('sortkey', 'value'))
result['resolutions'] = list(models.Resolution.objects.all().order_by('sortkey', 'value').distinct())
result['severities'] = list(models.Severity.objects.all().order_by('sortkey', 'value'))
result['priorities'] = list(models.Priority.objects.all().order_by('sortkey', 'value'))
result['hardwares'] = list(models.Hardware.objects.all().order_by('sortkey', 'value'))
result['osses'] = list(models.OS.objects.all().order_by('sortkey', 'value'))
# TODO: Check project's permission.
result['projects'] = list(models.Product.objects.all().order_by('name'))
result['components'] = list(models.Component.objects.all().order_by('product__id', 'name').distinct())
result['versions'] = list(models.Version.objects.all().order_by('id').distinct())
result['milestones'] = list(models.Milestone.objects.all().order_by('sortkey', 'value').distinct())
# Sort the components/version/other per project
result['per_project'] = {}
result['duplicates'] = {}
for key in ['components', 'versions', 'milestones']:
duplicates = remove_duplicates(result[key])
result['per_project'][key] = item_per_project(result[key])
result['duplicates'][key] = duplicates
result[key] = [v for v in result[key] if v.id not in duplicates['duplicates']]
result['SUMMARY_TYPE'] = SUMMARY_TYPE
result['BUG_TYPE'] = BUG_TYPE
result['EMAIL_TYPE'] = EMAIL_TYPE
result['BUG_INCLUSION_TYPE'] = BUG_INCLUSION_TYPE
result['CUSTOM_SEARCH'] = CUSTOM_SEARCH
# Filter the various values based on authorized projects.
return result
def query(request, *args, **kwargs):
view_type = request.GET.get('format', 'specific')
if view_type == 'specific':
return SimpleQuery.as_view()(request, *args, **kwargs)
elif view_type == 'advanced':
return ComplexQuery.as_view()(request, *args, **kwargs)
error_message = """The requested format <em>%s</em> does not exist with
a content type of <em>html</em>.""" % (view_type,)
return Error.as_view(message=error_message)(request, *args, **kwargs)
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*- #
import twitterbot
import yaml
import random
import sys
import re
import time
import identify_contests as ic
import tweepy
from httplib import IncompleteRead
class MyTwitterBot(twitterbot.TwitterBot):
def bot_init(self, home='./'):
"""
Initialize and configure your bot!
Use this function to set options and initialize your own custom bot
state (if any).
"""
# REQUIRED: LOGIN DETAILS! Load these from file. #
with self.config['storage'].read('config.yaml') as f:
tokens = yaml.load(f)
self.config.update(tokens)
# SEMI-OPTIONAL: OTHER CONFIG STUFF! #
self.config['tweet_interval'] = 5*60*60 # default: 30 minutes
self.config['tweet_interval_range'] = (5*60*60, 10*60*60)
self.config['reply_direct_mention_only'] = False
self.config['reply_followers_only'] = False
self.config['autofav_mentions'] = False
self.config['autofav_keywords'] = []
self.config['autofollow'] = False
###########################################
# CUSTOM: your bot's own state variables! #
###########################################
# If you'd like to save variables with the bot's state, use the
# self.state dictionary. These will only be initialized if the bot is
# not loading a previous saved state.
# self.state['butt_counter'] = 0
# You can also add custom functions that run at regular intervals
# using self.register_custom_handler(function, interval).
#
# For instance, if your normal timeline tweet interval is every 30
# minutes, but you'd also like to post something different every 24
# hours, you would implement self.my_function and add the following
# line here:
# self.register_custom_handler(self.my_function, 60 * 60 * 24)
self.register_custom_handler(self.drop_old_follows, 12*60*60)
self.register_custom_handler(self.reset_follow_count, 3*60*60)
self.register_custom_handler(self.reload_ignored_users, 2*60*60)
self.state['rejected_tweets_count'] = 0
self.state['recent_follow_count'] = 0
self.ignored_users = []
def bot_init2(self):
"""
Super hacky. Call this in the bot init, after the API has been set up.
"""
# Start the streaming API!
self.listener = twitterbot.BotStreamListener(method=self.on_stream)
try:
self.stream = tweepy.Stream(auth=self.api.auth, listener=self.listener)
keyphrases = ["RT to win", "RT to enter", "retweet to win", "retweet to enter"]
self.stream.filter(track=keyphrases, async=True)
except:
pass
def on_scheduled_tweet(self):
"""
Make a public tweet to the bot's own timeline.
It's up to you to ensure that it's less than 140 characters.
Set tweet frequency in seconds with TWEET_INTERVAL in config.py.
"""
# text = function_that_returns_a_string_goes_here()
# self.post_tweet(text)
luck_texts = ["Fingers crossed.", "Where's my rabbit foot?", "My lucky number is irrational.",
"A sweepstakes a day brings some junk to your door.", "Garlic protects from evil spirits."]
self.post_tweet(random.choice(luck_texts))
def on_mention(self, tweet, prefix):
"""
Defines actions to take when a mention is received.
tweet - a tweepy.Status object. You can access the text with
tweet.text
prefix - the @-mentions for this reply. No need to include this in the
reply string; it's provided so you can use it to make sure the value
you return is within the 140 character limit with this.
It's up to you to ensure that the prefix and tweet are less than 140
characters.
When calling post_tweet, you MUST include reply_to=tweet, or
Twitter won't count it as a reply.
"""
winning_words = ['won', 'winner', 'congrats', 'congratulations', 'prize', 'win']
warnings = ['fake', 'scam', 'cheat', 'fraud', 'hoax', 'sucker']
exclamations = ['Sweet', 'Awesome', 'Wahoo', 'OMG', 'Hooray', 'Hoorah',
'Wow', 'ZOMGZ', 'Huzzah', 'Holy cannoli', 'Woohoo', 'Woot' ]
if sum([w in tweet.text.lower() for w in winning_words]) > 0:
text = random.choice(exclamations) + '!'*random.randrange(1,4)
elif sum([w in tweet.text.lower() for w in warnings]) > 0:
text = "I'm just a bot, I don't have to be very smart."
else:
text = "Sorry, interactivity hasn't really been implemented yet."
text = prefix + ' ' + text
self.post_tweet(text, reply_to=tweet)
def on_timeline(self, tweet, prefix):
"""
Defines actions to take on a timeline tweet.
tweet - a tweepy.Status object. You can access the text with
tweet.text
prefix - the @-mentions for this reply. No need to include this in the
reply string; it's provided so you can use it to make sure the value
you return is within the 140 character limit with this.
It's up to you to ensure that the prefix and tweet are less than 140
characters.
When calling post_tweet, you MUST include reply_to=tweet, or
Twitter won't count it as a reply.
"""
# text = function_that_returns_a_string_goes_here()
# prefixed_text = prefix + ' ' + text
# self.post_tweet(prefix + ' ' + text, reply_to=tweet)
# call this to fav the tweet!
# if something:
# self.favorite_tweet(tweet)
pass
def on_stream(self, tweet):
"""
Defines action to take when streaming API returns a status obj.
"""
# Does the tweet contain a URL/reference pointing to another tweet?
tweet = self.chase_embedded_tweet_url(tweet)
self.check_and_retweet(tweet)
def chase_embedded_tweet_url(self, tweet):
"""
Checks to see if the tweet is just linking to another tweet.
If so, return that tweet. Or return the tweet to which the tweet is replying.
If not, return the original tweet.
"""
if len(tweet.entities['urls']) == 0 and tweet.in_reply_to_status_id_str is None:
return tweet
if len(tweet.entities['urls']) == 0:
try:
tweet = self.api.get_status(id=tweet.in_reply_to_status_id_str)
return tweet
except tweepy.TweepError as e:
return tweet
for u in tweet.entities['urls']:
twitter = re.compile('https?://twitter\.com/', re.IGNORECASE)
if twitter.search(u['expanded_url']) is not None:
# pull the status ID from the URL & retrieve it
re_status = re.compile('/status/(\d+)/?', re.IGNORECASE)
tweet_id = re_status.search(u['expanded_url']).groups()[0]
try:
tweet = self.api.get_status(id=tweet_id)
return tweet
except tweepy.TweepError as e:
pass
return tweet
def check_and_retweet(self, tweet):
"""
Takes a tweet, decides if it's a contest tweet, RTs it.
"""
# Check the ignored users list
if tweet.user.id in self.ignored_users:
return None
# Check if it's really a contest
(rt_me, reject_reason) = ic.is_contest(tweet)
if rt_me == False:
self.state['rejected_tweets_count'] += 1
return None
# TODO: Do something with the results.
def drop_old_follows(self):
"""
Unfollow the oldest-followed accounts (except those on a safe list).
"""
max_following = random.randrange(1910,1960)
safe_users = []
for user in tweepy.Cursor(self.api.list_members, 'luckysqrt2', 'cool-brands').items():
safe_users.append(user.id)
# the FIRST objs in the friend list are the most recently followed accounts.
# Drop from the LAST list items, if they are not in safe list.
following = self.api.friends_ids()
num_following = len(following)
# loop from end to beginning of user list
i = -1
while num_following > max_following:
user = following[i]
if user in safe_users:
pass
else:
self.api.destroy_friendship(user_id=user)
num_following += -1
i += -1
# break if we loop back around to the beginning.
# TODO: This means the safe users list is too long & requires human action!
if i + len(following) == 0:
break
def reset_follow_count(self):
text = "I've rejected {} not-quite-contest tweets in the past 3 hours.".format(self.state['rejected_tweets_count'])
self.post_tweet(text)
self.state['recent_follow_count'] = 0
self.state['rejected_tweets_count'] = 0
def reload_ignored_users(self):
self.ignored_users = []
for user in tweepy.Cursor(self.api.list_members, 'luckysqrt2', 'ignore').items():
self.ignored_users.append(user.id)
if __name__ == '__main__':
# get home directory from command line argument
if len(sys.argv) > 1:
rootdir = sys.argv[1]
else:
rootdir = './'
bot = MyTwitterBot(home=rootdir)
bot.run()
|
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The General Language Understanding Evaluation (GLUE) benchmark."""
import csv
import os
import textwrap
import numpy as np
import six
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_GLUE_CITATION = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
Note that each GLUE dataset has its own citation. Please see the source to see
the correct citation for each contained dataset."""
_GLUE_DESCRIPTION = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
_MRPC_DEV_IDS = "https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-48f4-b431-7480817f1adc"
_MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
_MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
_MNLI_BASE_KWARGS = dict(
text_features={
"premise": "sentence1",
"hypothesis": "sentence2",
},
label_classes=["entailment", "neutral", "contradiction"],
label_column="gold_label",
data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
data_dir="MNLI",
citation=textwrap.dedent("""\
@InProceedings{N18-1101,
author = "Williams, Adina
and Nangia, Nikita
and Bowman, Samuel",
title = "A Broad-Coverage Challenge Corpus for
Sentence Understanding through Inference",
booktitle = "Proceedings of the 2018 Conference of
the North American Chapter of the
Association for Computational Linguistics:
Human Language Technologies, Volume 1 (Long
Papers)",
year = "2018",
publisher = "Association for Computational Linguistics",
pages = "1112--1122",
location = "New Orleans, Louisiana",
url = "http://aclweb.org/anthology/N18-1101"
}
@article{bowman2015large,
title={A large annotated corpus for learning natural language inference},
author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
journal={arXiv preprint arXiv:1508.05326},
year={2015}
}"""),
url="http://www.nyu.edu/projects/bowman/multinli/")
class GlueConfig(tfds.core.BuilderConfig):
"""BuilderConfig for GLUE."""
def __init__(self,
*,
text_features,
label_column,
data_url,
data_dir,
citation,
url,
label_classes=None,
process_label=lambda x: x,
**kwargs):
"""BuilderConfig for GLUE.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the tsv file
label_column: `string`, name of the column in the tsv file corresponding
to the label
data_url: `string`, url to download the zip file from
data_dir: `string`, the path to the folder containing the tsv files in the
downloaded zip
citation: `string`, citation for the data set
url: `string`, url for information about the data set
label_classes: `list[string]`, the list of classes if the label is
categorical. If not provided, then the label will be of type
`tf.float32`.
process_label: `Function[string, any]`, function taking in the raw value
of the label and processing it to the form required by the label feature
**kwargs: keyword arguments forwarded to super.
"""
super(GlueConfig, self).__init__(
version=tfds.core.Version("2.0.0"),
supported_versions=[
tfds.core.Version("1.0.0"),
tfds.core.Version("1.0.1"),
],
release_notes={
"1.0.0": "New split API (https://tensorflow.org/datasets/splits)",
"1.0.1": "Update dead URL links.",
"2.0.0": "Update data source for glue/qqp.",
},
**kwargs) # pytype: disable=wrong-arg-types # gen-stub-imports
self.text_features = text_features
self.label_column = label_column
self.label_classes = label_classes
self.data_url = data_url
self.data_dir = data_dir
self.citation = citation
self.url = url
self.process_label = process_label
class Glue(tfds.core.GeneratorBasedBuilder):
"""The General Language Understanding Evaluation (GLUE) benchmark."""
BUILDER_CONFIGS = [
GlueConfig(
name="cola",
description=textwrap.dedent("""\
The Corpus of Linguistic Acceptability consists of English
acceptability judgments drawn from books and journal articles on
linguistic theory. Each example is a sequence of words annotated
with whether it is a grammatical English sentence."""),
text_features={"sentence": "sentence"},
label_classes=["unacceptable", "acceptable"],
label_column="is_acceptable",
data_url="https://dl.fbaipublicfiles.com/glue/data/CoLA.zip",
data_dir="CoLA",
citation=textwrap.dedent("""\
@article{warstadt2018neural,
title={Neural Network Acceptability Judgments},
author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
journal={arXiv preprint arXiv:1805.12471},
year={2018}
}"""),
url="https://nyu-mll.github.io/CoLA/"),
GlueConfig(
name="sst2",
description=textwrap.dedent("""\
The Stanford Sentiment Treebank consists of sentences from movie reviews and
human annotations of their sentiment. The task is to predict the sentiment of a
given sentence. We use the two-way (positive/negative) class split, and use only
sentence-level labels."""),
text_features={"sentence": "sentence"},
label_classes=["negative", "positive"],
label_column="label",
data_url="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
data_dir="SST-2",
citation=textwrap.dedent("""\
@inproceedings{socher2013recursive,
title={Recursive deep models for semantic compositionality over a sentiment treebank},
author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
pages={1631--1642},
year={2013}
}"""),
url="https://nlp.stanford.edu/sentiment/index.html"),
GlueConfig(
name="mrpc",
description=textwrap.dedent("""\
The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
sentence pairs automatically extracted from online news sources, with human annotations
for whether the sentences in the pair are semantically equivalent."""
), # pylint: disable=line-too-long
text_features={
"sentence1": "",
"sentence2": ""
},
label_classes=["not_equivalent", "equivalent"],
label_column="Quality",
data_url="", # MRPC isn't hosted by GLUE.
data_dir="MRPC",
citation=textwrap.dedent("""\
@inproceedings{dolan2005automatically,
title={Automatically constructing a corpus of sentential paraphrases},
author={Dolan, William B and Brockett, Chris},
booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
year={2005}
}"""),
url="https://www.microsoft.com/en-us/download/details.aspx?id=52398"),
GlueConfig(
name="qqp",
description=textwrap.dedent("""\
The Quora Question Pairs2 dataset is a collection of question pairs from the
community question-answering website Quora. The task is to determine whether a
pair of questions are semantically equivalent."""),
text_features={
"question1": "question1",
"question2": "question2",
},
label_classes=["not_duplicate", "duplicate"],
label_column="is_duplicate",
data_url="https://dl.fbaipublicfiles.com/glue/data/QQP.zip",
data_dir="QQP",
citation=textwrap.dedent("""\
@online{WinNT,
author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
title = {First Quora Dataset Release: Question Pairs},
year = 2017,
url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
urldate = {2019-04-03}
}"""),
url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs"
),
GlueConfig(
name="stsb",
description=textwrap.dedent("""\
The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
sentence pairs drawn from news headlines, video and image captions, and natural
language inference data. Each pair is human-annotated with a similarity score
from 0 to 5."""),
text_features={
"sentence1": "sentence1",
"sentence2": "sentence2",
},
label_column="score",
data_url="https://dl.fbaipublicfiles.com/glue/data/STS-B.zip",
data_dir="STS-B",
citation=textwrap.dedent("""\
@article{cer2017semeval,
title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
journal={arXiv preprint arXiv:1708.00055},
year={2017}
}"""),
url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
process_label=np.float32),
GlueConfig(
name="mnli",
description=textwrap.dedent("""\
The Multi-Genre Natural Language Inference Corpus is a crowdsourced
collection of sentence pairs with textual entailment annotations. Given a premise sentence
and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
(entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
gathered from ten different sources, including transcribed speech, fiction, and government reports.
We use the standard test set, for which we obtained private labels from the authors, and evaluate
on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
the SNLI corpus as 550k examples of auxiliary training data."""),
**_MNLI_BASE_KWARGS),
GlueConfig(
name="mnli_mismatched",
description=textwrap.dedent("""\
The mismatched validation and test splits from MNLI.
See the "mnli" BuilderConfig for additional information."""),
**_MNLI_BASE_KWARGS),
GlueConfig(
name="mnli_matched",
description=textwrap.dedent("""\
The matched validation and test splits from MNLI.
See the "mnli" BuilderConfig for additional information."""),
**_MNLI_BASE_KWARGS),
GlueConfig(
name="qnli",
description=textwrap.dedent("""\
The Stanford Question Answering Dataset is a question-answering
dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
convert the task into sentence pair classification by forming a pair between each question and each
sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
question and the context sentence. The task is to determine whether the context sentence contains
the answer to the question. This modified version of the original task removes the requirement that
the model select the exact answer, but also removes the simplifying assumptions that the answer
is always present in the input and that lexical overlap is a reliable cue."""
), # pylint: disable=line-too-long
text_features={
"question": "question",
"sentence": "sentence",
},
label_classes=["entailment", "not_entailment"],
label_column="label",
data_url="https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip",
data_dir="QNLI",
citation=textwrap.dedent("""\
@article{rajpurkar2016squad,
title={Squad: 100,000+ questions for machine comprehension of text},
author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
journal={arXiv preprint arXiv:1606.05250},
year={2016}
}"""),
url="https://rajpurkar.github.io/SQuAD-explorer/"),
GlueConfig(
name="rte",
description=textwrap.dedent("""\
The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
), # pylint: disable=line-too-long
text_features={
"sentence1": "sentence1",
"sentence2": "sentence2",
},
label_classes=["entailment", "not_entailment"],
label_column="label",
data_url="https://dl.fbaipublicfiles.com/glue/data/RTE.zip",
data_dir="RTE",
citation=textwrap.dedent("""\
@inproceedings{dagan2005pascal,
title={The PASCAL recognising textual entailment challenge},
author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
booktitle={Machine Learning Challenges Workshop},
pages={177--190},
year={2005},
organization={Springer}
}
@inproceedings{bar2006second,
title={The second pascal recognising textual entailment challenge},
author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
volume={6},
number={1},
pages={6--4},
year={2006},
organization={Venice}
}
@inproceedings{giampiccolo2007third,
title={The third pascal recognizing textual entailment challenge},
author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
pages={1--9},
year={2007},
organization={Association for Computational Linguistics}
}
@inproceedings{bentivogli2009fifth,
title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
booktitle={TAC},
year={2009}
}"""),
url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment"),
GlueConfig(
name="wnli",
description=textwrap.dedent("""\
The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
in which a system must read a sentence with a pronoun and select the referent of that pronoun from
a list of choices. The examples are manually constructed to foil simple statistical methods: Each
one is contingent on contextual information provided by a single word or phrase in the sentence.
To convert the problem into sentence pair classification, we construct sentence pairs by replacing
the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
new examples derived from fiction books that was shared privately by the authors of the original
corpus. While the included training set is balanced between two classes, the test set is imbalanced
between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
hypotheses are sometimes shared between training and development examples, so if a model memorizes the
training examples, they will predict the wrong label on corresponding development set
example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
between a model's score on this task and its score on the unconverted original task. We
call converted dataset WNLI (Winograd NLI)."""),
text_features={
"sentence1": "sentence1",
"sentence2": "sentence2",
},
label_classes=["not_entailment", "entailment"],
label_column="label",
data_url="https://dl.fbaipublicfiles.com/glue/data/WNLI.zip",
data_dir="WNLI",
citation=textwrap.dedent("""\
@inproceedings{levesque2012winograd,
title={The winograd schema challenge},
author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
year={2012}
}"""),
url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html"
),
GlueConfig(
name="ax",
description=textwrap.dedent("""\
A manually-curated evaluation dataset for fine-grained analysis of
system performance on a broad range of linguistic phenomena. This
dataset evaluates sentence understanding through Natural Language
Inference (NLI) problems. Use a model trained on MulitNLI to produce
predictions for this dataset."""),
text_features={
"premise": "sentence1",
"hypothesis": "sentence2",
},
label_classes=["entailment", "neutral", "contradiction"],
label_column="", # No label since we only have test set.
# We must use a URL shortener since the URL from GLUE is very long and
# causes issues in TFDS.
data_url="https://bit.ly/2BOtOJ7",
data_dir="", # We are downloading a tsv.
citation="", # The GLUE citation is sufficient.
url="https://gluebenchmark.com/diagnostics"),
]
def _info(self):
features = {
text_feature: tfds.features.Text()
for text_feature in six.iterkeys(self.builder_config.text_features)
}
if self.builder_config.label_classes:
features["label"] = tfds.features.ClassLabel(
names=self.builder_config.label_classes)
else:
features["label"] = tf.float32
features["idx"] = tf.int32
return tfds.core.DatasetInfo(
builder=self,
description=_GLUE_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
homepage=self.builder_config.url,
citation=self.builder_config.citation + "\n" + _GLUE_CITATION,
)
def _split_generators(self, dl_manager):
if self.builder_config.name == "ax":
data_file = dl_manager.download(self.builder_config.data_url)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"data_file": data_file,
"split": "test",
})
]
if self.builder_config.name == "mrpc":
data_dir = None
mrpc_files = dl_manager.download({
"dev_ids": _MRPC_DEV_IDS,
"train": _MRPC_TRAIN,
"test": _MRPC_TEST,
})
else:
dl_dir = dl_manager.download_and_extract(self.builder_config.data_url)
data_dir = os.path.join(dl_dir, self.builder_config.data_dir)
mrpc_files = None
train_split = tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "train.tsv"),
"split": "train",
"mrpc_files": mrpc_files,
})
if self.builder_config.name == "mnli":
return [
train_split,
_mnli_split_generator(
"validation_matched", data_dir, "dev", matched=True),
_mnli_split_generator(
"validation_mismatched", data_dir, "dev", matched=False),
_mnli_split_generator("test_matched", data_dir, "test", matched=True),
_mnli_split_generator(
"test_mismatched", data_dir, "test", matched=False)
]
elif self.builder_config.name == "mnli_matched":
return [
_mnli_split_generator("validation", data_dir, "dev", matched=True),
_mnli_split_generator("test", data_dir, "test", matched=True)
]
elif self.builder_config.name == "mnli_mismatched":
return [
_mnli_split_generator("validation", data_dir, "dev", matched=False),
_mnli_split_generator("test", data_dir, "test", matched=False)
]
else:
return [
train_split,
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "dev.tsv"),
"split": "dev",
"mrpc_files": mrpc_files,
}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "test.tsv"),
"split": "test",
"mrpc_files": mrpc_files,
}),
]
def _generate_examples(self, data_file, split, mrpc_files=None):
if self.builder_config.name == "mrpc":
# We have to prepare the MRPC dataset from the original sources ourselves.
examples = self._generate_example_mrpc_files(
mrpc_files=mrpc_files, split=split)
for example in examples:
yield example["idx"], example
else:
process_label = self.builder_config.process_label
label_classes = self.builder_config.label_classes
# The train and dev files for CoLA are the only tsv files without a
# header.
is_cola_non_test = self.builder_config.name == "cola" and split != "test"
with tf.io.gfile.GFile(data_file) as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
if is_cola_non_test:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for n, row in enumerate(reader):
if is_cola_non_test:
row = {
"sentence": row[3],
"is_acceptable": row[1],
}
example = {
feat: row[col]
for feat, col in six.iteritems(self.builder_config.text_features)
}
example["idx"] = n
if self.builder_config.label_column in row:
label = row[self.builder_config.label_column]
# For some tasks, the label is represented as 0 and 1 in the tsv
# files and needs to be cast to integer to work with the feature.
if label_classes and label not in label_classes:
label = int(label) if label else None
example["label"] = process_label(label)
else:
example["label"] = process_label(-1)
# Filter out corrupted rows.
for value in six.itervalues(example):
if value is None:
break
else:
yield example["idx"], example
def _generate_example_mrpc_files(self, mrpc_files, split):
if split == "test":
with tf.io.gfile.GFile(mrpc_files["test"]) as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for n, row in enumerate(reader):
yield {
"sentence1": row["#1 String"],
"sentence2": row["#2 String"],
"label": -1,
"idx": n,
}
else:
with tf.io.gfile.GFile(mrpc_files["dev_ids"]) as f:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
dev_ids = [[row[0], row[1]] for row in reader]
with tf.io.gfile.GFile(mrpc_files["train"]) as f:
# The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
# the Quality key.
f.seek(3)
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for n, row in enumerate(reader):
is_row_in_dev = [row["#1 ID"], row["#2 ID"]] in dev_ids
if is_row_in_dev == (split == "dev"):
yield {
"sentence1": row["#1 String"],
"sentence2": row["#2 String"],
"label": int(row["Quality"]),
"idx": n,
}
def _mnli_split_generator(name, data_dir, split, matched):
return tfds.core.SplitGenerator(
name=name,
gen_kwargs={
"data_file":
os.path.join(
data_dir, "%s_%s.tsv" %
(split, "matched" if matched else "mismatched")),
"split":
split,
"mrpc_files":
None,
})
|
|
# Copyright 2019 The gRPC Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests behavior of the Call classes."""
import asyncio
import logging
import unittest
import datetime
import grpc
from grpc.experimental import aio
from src.proto.grpc.testing import messages_pb2, test_pb2_grpc
from tests_aio.unit._test_base import AioTestBase
from tests_aio.unit._test_server import start_test_server
from tests_aio.unit._constants import UNREACHABLE_TARGET
_SHORT_TIMEOUT_S = datetime.timedelta(seconds=1).total_seconds()
_NUM_STREAM_RESPONSES = 5
_RESPONSE_PAYLOAD_SIZE = 42
_REQUEST_PAYLOAD_SIZE = 7
_LOCAL_CANCEL_DETAILS_EXPECTATION = 'Locally cancelled by application!'
_RESPONSE_INTERVAL_US = int(_SHORT_TIMEOUT_S * 1000 * 1000)
_INFINITE_INTERVAL_US = 2**31 - 1
class _MulticallableTestMixin():
async def setUp(self):
address, self._server = await start_test_server()
self._channel = aio.insecure_channel(address)
self._stub = test_pb2_grpc.TestServiceStub(self._channel)
async def tearDown(self):
await self._channel.close()
await self._server.stop(None)
class TestUnaryUnaryCall(_MulticallableTestMixin, AioTestBase):
async def test_call_to_string(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
self.assertTrue(str(call) is not None)
self.assertTrue(repr(call) is not None)
await call
self.assertTrue(str(call) is not None)
self.assertTrue(repr(call) is not None)
async def test_call_ok(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
self.assertFalse(call.done())
response = await call
self.assertTrue(call.done())
self.assertIsInstance(response, messages_pb2.SimpleResponse)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
# Response is cached at call object level, reentrance
# returns again the same response
response_retry = await call
self.assertIs(response, response_retry)
async def test_call_rpc_error(self):
async with aio.insecure_channel(UNREACHABLE_TARGET) as channel:
stub = test_pb2_grpc.TestServiceStub(channel)
call = stub.UnaryCall(messages_pb2.SimpleRequest())
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
self.assertEqual(grpc.StatusCode.UNAVAILABLE,
exception_context.exception.code())
self.assertTrue(call.done())
self.assertEqual(grpc.StatusCode.UNAVAILABLE, await call.code())
async def test_call_code_awaitable(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_call_details_awaitable(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
self.assertEqual('', await call.details())
async def test_call_initial_metadata_awaitable(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
self.assertEqual(aio.Metadata(), await call.initial_metadata())
async def test_call_trailing_metadata_awaitable(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
self.assertEqual(aio.Metadata(), await call.trailing_metadata())
async def test_call_initial_metadata_cancelable(self):
coro_started = asyncio.Event()
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
async def coro():
coro_started.set()
await call.initial_metadata()
task = self.loop.create_task(coro())
await coro_started.wait()
task.cancel()
# Test that initial metadata can still be asked thought
# a cancellation happened with the previous task
self.assertEqual(aio.Metadata(), await call.initial_metadata())
async def test_call_initial_metadata_multiple_waiters(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
async def coro():
return await call.initial_metadata()
task1 = self.loop.create_task(coro())
task2 = self.loop.create_task(coro())
await call
expected = [aio.Metadata() for _ in range(2)]
self.assertEqual(expected, await asyncio.gather(*[task1, task2]))
async def test_call_code_cancelable(self):
coro_started = asyncio.Event()
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
async def coro():
coro_started.set()
await call.code()
task = self.loop.create_task(coro())
await coro_started.wait()
task.cancel()
# Test that code can still be asked thought
# a cancellation happened with the previous task
self.assertEqual(grpc.StatusCode.OK, await call.code())
async def test_call_code_multiple_waiters(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
async def coro():
return await call.code()
task1 = self.loop.create_task(coro())
task2 = self.loop.create_task(coro())
await call
self.assertEqual([grpc.StatusCode.OK, grpc.StatusCode.OK], await
asyncio.gather(task1, task2))
async def test_cancel_unary_unary(self):
call = self._stub.UnaryCall(messages_pb2.SimpleRequest())
self.assertFalse(call.cancelled())
self.assertTrue(call.cancel())
self.assertFalse(call.cancel())
with self.assertRaises(asyncio.CancelledError):
await call
# The info in the RpcError should match the info in Call object.
self.assertTrue(call.cancelled())
self.assertEqual(await call.code(), grpc.StatusCode.CANCELLED)
self.assertEqual(await call.details(),
'Locally cancelled by application!')
async def test_cancel_unary_unary_in_task(self):
coro_started = asyncio.Event()
call = self._stub.EmptyCall(messages_pb2.SimpleRequest())
async def another_coro():
coro_started.set()
await call
task = self.loop.create_task(another_coro())
await coro_started.wait()
self.assertFalse(task.done())
task.cancel()
self.assertEqual(grpc.StatusCode.CANCELLED, await call.code())
with self.assertRaises(asyncio.CancelledError):
await task
async def test_passing_credentials_fails_over_insecure_channel(self):
call_credentials = grpc.composite_call_credentials(
grpc.access_token_call_credentials("abc"),
grpc.access_token_call_credentials("def"),
)
with self.assertRaisesRegex(
aio.UsageError,
"Call credentials are only valid on secure channels"):
self._stub.UnaryCall(messages_pb2.SimpleRequest(),
credentials=call_credentials)
class TestUnaryStreamCall(_MulticallableTestMixin, AioTestBase):
async def test_call_rpc_error(self):
channel = aio.insecure_channel(UNREACHABLE_TARGET)
request = messages_pb2.StreamingOutputCallRequest()
stub = test_pb2_grpc.TestServiceStub(channel)
call = stub.StreamingOutputCall(request)
with self.assertRaises(aio.AioRpcError) as exception_context:
async for response in call:
pass
self.assertEqual(grpc.StatusCode.UNAVAILABLE,
exception_context.exception.code())
self.assertTrue(call.done())
self.assertEqual(grpc.StatusCode.UNAVAILABLE, await call.code())
await channel.close()
async def test_cancel_unary_stream(self):
# Prepares the request
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters(
size=_RESPONSE_PAYLOAD_SIZE,
interval_us=_RESPONSE_INTERVAL_US,
))
# Invokes the actual RPC
call = self._stub.StreamingOutputCall(request)
self.assertFalse(call.cancelled())
response = await call.read()
self.assertIs(type(response), messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertTrue(call.cancel())
self.assertEqual(grpc.StatusCode.CANCELLED, await call.code())
self.assertEqual(_LOCAL_CANCEL_DETAILS_EXPECTATION, await
call.details())
self.assertFalse(call.cancel())
with self.assertRaises(asyncio.CancelledError):
await call.read()
self.assertTrue(call.cancelled())
async def test_multiple_cancel_unary_stream(self):
# Prepares the request
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters(
size=_RESPONSE_PAYLOAD_SIZE,
interval_us=_RESPONSE_INTERVAL_US,
))
# Invokes the actual RPC
call = self._stub.StreamingOutputCall(request)
self.assertFalse(call.cancelled())
response = await call.read()
self.assertIs(type(response), messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertTrue(call.cancel())
self.assertFalse(call.cancel())
self.assertFalse(call.cancel())
self.assertFalse(call.cancel())
with self.assertRaises(asyncio.CancelledError):
await call.read()
async def test_early_cancel_unary_stream(self):
"""Test cancellation before receiving messages."""
# Prepares the request
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters(
size=_RESPONSE_PAYLOAD_SIZE,
interval_us=_RESPONSE_INTERVAL_US,
))
# Invokes the actual RPC
call = self._stub.StreamingOutputCall(request)
self.assertFalse(call.cancelled())
self.assertTrue(call.cancel())
self.assertFalse(call.cancel())
with self.assertRaises(asyncio.CancelledError):
await call.read()
self.assertTrue(call.cancelled())
self.assertEqual(grpc.StatusCode.CANCELLED, await call.code())
self.assertEqual(_LOCAL_CANCEL_DETAILS_EXPECTATION, await
call.details())
async def test_late_cancel_unary_stream(self):
"""Test cancellation after received all messages."""
# Prepares the request
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE,))
# Invokes the actual RPC
call = self._stub.StreamingOutputCall(request)
for _ in range(_NUM_STREAM_RESPONSES):
response = await call.read()
self.assertIs(type(response),
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
# After all messages received, it is possible that the final state
# is received or on its way. It's basically a data race, so our
# expectation here is do not crash :)
call.cancel()
self.assertIn(await call.code(),
[grpc.StatusCode.OK, grpc.StatusCode.CANCELLED])
async def test_too_many_reads_unary_stream(self):
"""Test calling read after received all messages fails."""
# Prepares the request
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE,))
# Invokes the actual RPC
call = self._stub.StreamingOutputCall(request)
for _ in range(_NUM_STREAM_RESPONSES):
response = await call.read()
self.assertIs(type(response),
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertIs(await call.read(), aio.EOF)
# After the RPC is finished, further reads will lead to exception.
self.assertEqual(await call.code(), grpc.StatusCode.OK)
self.assertIs(await call.read(), aio.EOF)
async def test_unary_stream_async_generator(self):
"""Sunny day test case for unary_stream."""
# Prepares the request
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE,))
# Invokes the actual RPC
call = self._stub.StreamingOutputCall(request)
self.assertFalse(call.cancelled())
async for response in call:
self.assertIs(type(response),
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_cancel_unary_stream_in_task_using_read(self):
coro_started = asyncio.Event()
# Configs the server method to block forever
request = messages_pb2.StreamingOutputCallRequest()
request.response_parameters.append(
messages_pb2.ResponseParameters(
size=_RESPONSE_PAYLOAD_SIZE,
interval_us=_INFINITE_INTERVAL_US,
))
# Invokes the actual RPC
call = self._stub.StreamingOutputCall(request)
async def another_coro():
coro_started.set()
await call.read()
task = self.loop.create_task(another_coro())
await coro_started.wait()
self.assertFalse(task.done())
task.cancel()
self.assertEqual(grpc.StatusCode.CANCELLED, await call.code())
with self.assertRaises(asyncio.CancelledError):
await task
async def test_cancel_unary_stream_in_task_using_async_for(self):
coro_started = asyncio.Event()
# Configs the server method to block forever
request = messages_pb2.StreamingOutputCallRequest()
request.response_parameters.append(
messages_pb2.ResponseParameters(
size=_RESPONSE_PAYLOAD_SIZE,
interval_us=_INFINITE_INTERVAL_US,
))
# Invokes the actual RPC
call = self._stub.StreamingOutputCall(request)
async def another_coro():
coro_started.set()
async for _ in call:
pass
task = self.loop.create_task(another_coro())
await coro_started.wait()
self.assertFalse(task.done())
task.cancel()
self.assertEqual(grpc.StatusCode.CANCELLED, await call.code())
with self.assertRaises(asyncio.CancelledError):
await task
async def test_time_remaining(self):
request = messages_pb2.StreamingOutputCallRequest()
# First message comes back immediately
request.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE,))
# Second message comes back after a unit of wait time
request.response_parameters.append(
messages_pb2.ResponseParameters(
size=_RESPONSE_PAYLOAD_SIZE,
interval_us=_RESPONSE_INTERVAL_US,
))
call = self._stub.StreamingOutputCall(request,
timeout=_SHORT_TIMEOUT_S * 2)
response = await call.read()
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
# Should be around the same as the timeout
remained_time = call.time_remaining()
self.assertGreater(remained_time, _SHORT_TIMEOUT_S * 3 / 2)
self.assertLess(remained_time, _SHORT_TIMEOUT_S * 5 / 2)
response = await call.read()
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
# Should be around the timeout minus a unit of wait time
remained_time = call.time_remaining()
self.assertGreater(remained_time, _SHORT_TIMEOUT_S / 2)
self.assertLess(remained_time, _SHORT_TIMEOUT_S * 3 / 2)
self.assertEqual(grpc.StatusCode.OK, await call.code())
async def test_empty_responses(self):
# Prepares the request
request = messages_pb2.StreamingOutputCallRequest()
for _ in range(_NUM_STREAM_RESPONSES):
request.response_parameters.append(
messages_pb2.ResponseParameters())
# Invokes the actual RPC
call = self._stub.StreamingOutputCall(request)
for _ in range(_NUM_STREAM_RESPONSES):
response = await call.read()
self.assertIs(type(response),
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(b'', response.SerializeToString())
self.assertEqual(grpc.StatusCode.OK, await call.code())
class TestStreamUnaryCall(_MulticallableTestMixin, AioTestBase):
async def test_cancel_stream_unary(self):
call = self._stub.StreamingInputCall()
# Prepares the request
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
# Sends out requests
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(request)
# Cancels the RPC
self.assertFalse(call.done())
self.assertFalse(call.cancelled())
self.assertTrue(call.cancel())
self.assertTrue(call.cancelled())
await call.done_writing()
with self.assertRaises(asyncio.CancelledError):
await call
async def test_early_cancel_stream_unary(self):
call = self._stub.StreamingInputCall()
# Cancels the RPC
self.assertFalse(call.done())
self.assertFalse(call.cancelled())
self.assertTrue(call.cancel())
self.assertTrue(call.cancelled())
with self.assertRaises(asyncio.InvalidStateError):
await call.write(messages_pb2.StreamingInputCallRequest())
# Should be no-op
await call.done_writing()
with self.assertRaises(asyncio.CancelledError):
await call
async def test_write_after_done_writing(self):
call = self._stub.StreamingInputCall()
# Prepares the request
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
# Sends out requests
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(request)
# Should be no-op
await call.done_writing()
with self.assertRaises(asyncio.InvalidStateError):
await call.write(messages_pb2.StreamingInputCallRequest())
response = await call
self.assertIsInstance(response, messages_pb2.StreamingInputCallResponse)
self.assertEqual(_NUM_STREAM_RESPONSES * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_error_in_async_generator(self):
# Server will pause between responses
request = messages_pb2.StreamingOutputCallRequest()
request.response_parameters.append(
messages_pb2.ResponseParameters(
size=_RESPONSE_PAYLOAD_SIZE,
interval_us=_RESPONSE_INTERVAL_US,
))
# We expect the request iterator to receive the exception
request_iterator_received_the_exception = asyncio.Event()
async def request_iterator():
with self.assertRaises(asyncio.CancelledError):
for _ in range(_NUM_STREAM_RESPONSES):
yield request
await asyncio.sleep(_SHORT_TIMEOUT_S)
request_iterator_received_the_exception.set()
call = self._stub.StreamingInputCall(request_iterator())
# Cancel the RPC after at least one response
async def cancel_later():
await asyncio.sleep(_SHORT_TIMEOUT_S * 2)
call.cancel()
cancel_later_task = self.loop.create_task(cancel_later())
with self.assertRaises(asyncio.CancelledError):
await call
await request_iterator_received_the_exception.wait()
# No failures in the cancel later task!
await cancel_later_task
async def test_normal_iterable_requests(self):
# Prepares the request
payload = messages_pb2.Payload(body=b'\0' * _REQUEST_PAYLOAD_SIZE)
request = messages_pb2.StreamingInputCallRequest(payload=payload)
requests = [request] * _NUM_STREAM_RESPONSES
# Sends out requests
call = self._stub.StreamingInputCall(requests)
# RPC should succeed
response = await call
self.assertIsInstance(response, messages_pb2.StreamingInputCallResponse)
self.assertEqual(_NUM_STREAM_RESPONSES * _REQUEST_PAYLOAD_SIZE,
response.aggregated_payload_size)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_call_rpc_error(self):
async with aio.insecure_channel(UNREACHABLE_TARGET) as channel:
stub = test_pb2_grpc.TestServiceStub(channel)
# The error should be raised automatically without any traffic.
call = stub.StreamingInputCall()
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
self.assertEqual(grpc.StatusCode.UNAVAILABLE,
exception_context.exception.code())
self.assertTrue(call.done())
self.assertEqual(grpc.StatusCode.UNAVAILABLE, await call.code())
async def test_timeout(self):
call = self._stub.StreamingInputCall(timeout=_SHORT_TIMEOUT_S)
# The error should be raised automatically without any traffic.
with self.assertRaises(aio.AioRpcError) as exception_context:
await call
rpc_error = exception_context.exception
self.assertEqual(grpc.StatusCode.DEADLINE_EXCEEDED, rpc_error.code())
self.assertTrue(call.done())
self.assertEqual(grpc.StatusCode.DEADLINE_EXCEEDED, await call.code())
# Prepares the request that stream in a ping-pong manner.
_STREAM_OUTPUT_REQUEST_ONE_RESPONSE = messages_pb2.StreamingOutputCallRequest()
_STREAM_OUTPUT_REQUEST_ONE_RESPONSE.response_parameters.append(
messages_pb2.ResponseParameters(size=_RESPONSE_PAYLOAD_SIZE))
_STREAM_OUTPUT_REQUEST_ONE_EMPTY_RESPONSE = messages_pb2.StreamingOutputCallRequest(
)
_STREAM_OUTPUT_REQUEST_ONE_EMPTY_RESPONSE.response_parameters.append(
messages_pb2.ResponseParameters())
class TestStreamStreamCall(_MulticallableTestMixin, AioTestBase):
async def test_cancel(self):
# Invokes the actual RPC
call = self._stub.FullDuplexCall()
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(_STREAM_OUTPUT_REQUEST_ONE_RESPONSE)
response = await call.read()
self.assertIsInstance(response,
messages_pb2.StreamingOutputCallResponse)
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
# Cancels the RPC
self.assertFalse(call.done())
self.assertFalse(call.cancelled())
self.assertTrue(call.cancel())
self.assertTrue(call.cancelled())
self.assertEqual(grpc.StatusCode.CANCELLED, await call.code())
async def test_cancel_with_pending_read(self):
call = self._stub.FullDuplexCall()
await call.write(_STREAM_OUTPUT_REQUEST_ONE_RESPONSE)
# Cancels the RPC
self.assertFalse(call.done())
self.assertFalse(call.cancelled())
self.assertTrue(call.cancel())
self.assertTrue(call.cancelled())
self.assertEqual(grpc.StatusCode.CANCELLED, await call.code())
async def test_cancel_with_ongoing_read(self):
call = self._stub.FullDuplexCall()
coro_started = asyncio.Event()
async def read_coro():
coro_started.set()
await call.read()
read_task = self.loop.create_task(read_coro())
await coro_started.wait()
self.assertFalse(read_task.done())
# Cancels the RPC
self.assertFalse(call.done())
self.assertFalse(call.cancelled())
self.assertTrue(call.cancel())
self.assertTrue(call.cancelled())
self.assertEqual(grpc.StatusCode.CANCELLED, await call.code())
async def test_early_cancel(self):
call = self._stub.FullDuplexCall()
# Cancels the RPC
self.assertFalse(call.done())
self.assertFalse(call.cancelled())
self.assertTrue(call.cancel())
self.assertTrue(call.cancelled())
self.assertEqual(grpc.StatusCode.CANCELLED, await call.code())
async def test_cancel_after_done_writing(self):
call = self._stub.FullDuplexCall()
await call.done_writing()
# Cancels the RPC
self.assertFalse(call.done())
self.assertFalse(call.cancelled())
self.assertTrue(call.cancel())
self.assertTrue(call.cancelled())
self.assertEqual(grpc.StatusCode.CANCELLED, await call.code())
async def test_late_cancel(self):
call = self._stub.FullDuplexCall()
await call.done_writing()
self.assertEqual(grpc.StatusCode.OK, await call.code())
# Cancels the RPC
self.assertTrue(call.done())
self.assertFalse(call.cancelled())
self.assertFalse(call.cancel())
self.assertFalse(call.cancelled())
# Status is still OK
self.assertEqual(grpc.StatusCode.OK, await call.code())
async def test_async_generator(self):
async def request_generator():
yield _STREAM_OUTPUT_REQUEST_ONE_RESPONSE
yield _STREAM_OUTPUT_REQUEST_ONE_RESPONSE
call = self._stub.FullDuplexCall(request_generator())
async for response in call:
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_too_many_reads(self):
async def request_generator():
for _ in range(_NUM_STREAM_RESPONSES):
yield _STREAM_OUTPUT_REQUEST_ONE_RESPONSE
call = self._stub.FullDuplexCall(request_generator())
for _ in range(_NUM_STREAM_RESPONSES):
response = await call.read()
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertIs(await call.read(), aio.EOF)
self.assertEqual(await call.code(), grpc.StatusCode.OK)
# After the RPC finished, the read should also produce EOF
self.assertIs(await call.read(), aio.EOF)
async def test_read_write_after_done_writing(self):
call = self._stub.FullDuplexCall()
# Writes two requests, and pending two requests
await call.write(_STREAM_OUTPUT_REQUEST_ONE_RESPONSE)
await call.write(_STREAM_OUTPUT_REQUEST_ONE_RESPONSE)
await call.done_writing()
# Further write should fail
with self.assertRaises(asyncio.InvalidStateError):
await call.write(_STREAM_OUTPUT_REQUEST_ONE_RESPONSE)
# But read should be unaffected
response = await call.read()
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
response = await call.read()
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_error_in_async_generator(self):
# Server will pause between responses
request = messages_pb2.StreamingOutputCallRequest()
request.response_parameters.append(
messages_pb2.ResponseParameters(
size=_RESPONSE_PAYLOAD_SIZE,
interval_us=_RESPONSE_INTERVAL_US,
))
# We expect the request iterator to receive the exception
request_iterator_received_the_exception = asyncio.Event()
async def request_iterator():
with self.assertRaises(asyncio.CancelledError):
for _ in range(_NUM_STREAM_RESPONSES):
yield request
await asyncio.sleep(_SHORT_TIMEOUT_S)
request_iterator_received_the_exception.set()
call = self._stub.FullDuplexCall(request_iterator())
# Cancel the RPC after at least one response
async def cancel_later():
await asyncio.sleep(_SHORT_TIMEOUT_S * 2)
call.cancel()
cancel_later_task = self.loop.create_task(cancel_later())
with self.assertRaises(asyncio.CancelledError):
async for response in call:
self.assertEqual(_RESPONSE_PAYLOAD_SIZE,
len(response.payload.body))
await request_iterator_received_the_exception.wait()
self.assertEqual(grpc.StatusCode.CANCELLED, await call.code())
# No failures in the cancel later task!
await cancel_later_task
async def test_normal_iterable_requests(self):
requests = [_STREAM_OUTPUT_REQUEST_ONE_RESPONSE] * _NUM_STREAM_RESPONSES
call = self._stub.FullDuplexCall(iter(requests))
async for response in call:
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
self.assertEqual(await call.code(), grpc.StatusCode.OK)
async def test_empty_ping_pong(self):
call = self._stub.FullDuplexCall()
for _ in range(_NUM_STREAM_RESPONSES):
await call.write(_STREAM_OUTPUT_REQUEST_ONE_EMPTY_RESPONSE)
response = await call.read()
self.assertEqual(b'', response.SerializeToString())
await call.done_writing()
self.assertEqual(await call.code(), grpc.StatusCode.OK)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visual networks for processing pixel inputs."""
from typing import Callable, Optional, Sequence, Union
import sonnet as snt
import tensorflow as tf
class ResNetTorso(snt.Module):
"""ResNet architecture used in IMPALA paper."""
def __init__(
self,
num_channels: Sequence[int] = (16, 32, 32), # default to IMPALA resnet.
num_blocks: Sequence[int] = (2, 2, 2), # default to IMPALA resnet.
num_output_hidden: Sequence[int] = (256,), # default to IMPALA resnet.
conv_shape: Union[int, Sequence[int]] = 3,
conv_stride: Union[int, Sequence[int]] = 1,
pool_size: Union[int, Sequence[int]] = 3,
pool_stride: Union[int, Sequence[int]] = 2,
data_format: str = 'NHWC',
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.relu,
output_dtype: tf.DType = tf.float32,
name: str = 'resnet_torso'):
super().__init__(name=name)
self._output_dtype = output_dtype
self._num_layers = len(num_blocks)
# Create sequence of residual blocks.
blocks = []
for i in range(self._num_layers):
blocks.append(
ResidualBlockGroup(
num_blocks[i],
num_channels[i],
conv_shape,
conv_stride,
pool_size,
pool_stride,
data_format=data_format,
activation=activation))
# Create output layer.
out_layer = snt.nets.MLP(num_output_hidden, activation=activation)
# Compose blocks and final layer.
self._resnet = snt.Sequential(
blocks + [activation, snt.Flatten(), out_layer])
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
"""Evaluates the ResidualPixelCore."""
# Convert to floats.
preprocessed_inputs = _preprocess_inputs(inputs, self._output_dtype)
torso_output = self._resnet(preprocessed_inputs)
return torso_output
class ResidualBlockGroup(snt.Module):
"""Higher level block for ResNet implementation."""
def __init__(self,
num_blocks: int,
num_output_channels: int,
conv_shape: Union[int, Sequence[int]],
conv_stride: Union[int, Sequence[int]],
pool_shape: Union[int, Sequence[int]],
pool_stride: Union[int, Sequence[int]],
data_format: str = 'NHWC',
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.relu,
name: Optional[str] = None):
super().__init__(name=name)
self._num_blocks = num_blocks
self._data_format = data_format
self._activation = activation
# The pooling operation expects a 2-rank shape/stride (height and width).
if isinstance(pool_shape, int):
pool_shape = 2 * [pool_shape]
if isinstance(pool_stride, int):
pool_stride = 2 * [pool_stride]
# Create a Conv2D factory since we'll be making quite a few.
def build_conv_layer(name: str):
return snt.Conv2D(
num_output_channels,
conv_shape,
stride=conv_stride,
padding='SAME',
data_format=data_format,
name=name)
# Create a pooling layer.
def pooling_layer(inputs: tf.Tensor) -> tf.Tensor:
return tf.nn.pool(
inputs,
pool_shape,
pooling_type='MAX',
strides=pool_stride,
padding='SAME',
data_format=data_format)
# Create an initial conv layer and pooling to scale the image down.
self._downscale = snt.Sequential(
[build_conv_layer('downscale'), pooling_layer])
# Residual block(s).
self._convs = []
for i in range(self._num_blocks):
name = 'residual_block_%d' % i
self._convs.append(
[build_conv_layer(name + '_0'),
build_conv_layer(name + '_1')])
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
# Downscale the inputs.
conv_out = self._downscale(inputs)
# Apply (sequence of) residual block(s).
for i in range(self._num_blocks):
block_input = conv_out
conv_out = self._activation(conv_out)
conv_out = self._convs[i][0](conv_out)
conv_out = self._activation(conv_out)
conv_out = self._convs[i][1](conv_out)
conv_out += block_input
return conv_out
def _preprocess_inputs(inputs: tf.Tensor, output_dtype: tf.DType) -> tf.Tensor:
"""Returns the `Tensor` corresponding to the preprocessed inputs."""
rank = inputs.shape.rank
if rank < 4:
raise ValueError(
'Input Tensor must have at least 4 dimensions (for '
'batch size, height, width, and channels), but it only has '
'{}'.format(rank))
flattened_inputs = snt.Flatten(preserve_dims=3)(inputs)
processed_inputs = tf.image.convert_image_dtype(
flattened_inputs, dtype=output_dtype)
return processed_inputs
class DrQTorso(snt.Module):
"""DrQ Torso inspired by the second DrQ paper [Yarats et al., 2021].
[Yarats et al., 2021] https://arxiv.org/abs/2107.09645
"""
def __init__(
self,
data_format: str = 'NHWC',
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.relu,
output_dtype: tf.DType = tf.float32,
name: str = 'resnet_torso'):
super().__init__(name=name)
self._output_dtype = output_dtype
# Create a Conv2D factory since we'll be making quite a few.
gain = 2**0.5 if activation == tf.nn.relu else 1.
def build_conv_layer(name: str,
output_channels: int = 32,
kernel_shape: Sequence[int] = (3, 3),
stride: int = 1):
return snt.Conv2D(
output_channels=output_channels,
kernel_shape=kernel_shape,
stride=stride,
padding='SAME',
data_format=data_format,
w_init=snt.initializers.Orthogonal(gain=gain, seed=None),
b_init=snt.initializers.Zeros(),
name=name)
self._network = snt.Sequential(
[build_conv_layer('conv_0', stride=2),
activation,
build_conv_layer('conv_1', stride=1),
activation,
build_conv_layer('conv_2', stride=1),
activation,
build_conv_layer('conv_3', stride=1),
activation,
snt.Flatten()])
def __call__(self, inputs: tf.Tensor) -> tf.Tensor:
"""Evaluates the ResidualPixelCore."""
# Normalize to -0.5 to 0.5
preprocessed_inputs = _preprocess_inputs(inputs, self._output_dtype) - 0.5
torso_output = self._network(preprocessed_inputs)
return torso_output
|
|
"""Common IO api utilities"""
import os
import csv
import codecs
import mmap
from contextlib import contextmanager, closing
from pandas.compat import StringIO, BytesIO, string_types, text_type
from pandas import compat
from pandas.io.formats.printing import pprint_thing
from pandas.core.common import AbstractMethodError
from pandas.core.dtypes.common import is_number, is_file_like
# compat
from pandas.errors import (ParserError, DtypeWarning, # noqa
EmptyDataError, ParserWarning)
# gh-12665: Alias for now and remove later.
CParserError = ParserError
# common NA values
# no longer excluding inf representations
# '1.#INF','-1.#INF', '1.#INF000000',
_NA_VALUES = set([
'-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A',
'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan', '-nan', ''
])
if compat.PY3:
from urllib.request import urlopen, pathname2url
_urlopen = urlopen
from urllib.parse import urlparse as parse_url
from urllib.parse import (uses_relative, uses_netloc, uses_params,
urlencode, urljoin)
from urllib.error import URLError
from http.client import HTTPException # noqa
else:
from urllib2 import urlopen as _urlopen
from urllib import urlencode, pathname2url # noqa
from urlparse import urlparse as parse_url
from urlparse import uses_relative, uses_netloc, uses_params, urljoin
from urllib2 import URLError # noqa
from httplib import HTTPException # noqa
from contextlib import contextmanager, closing # noqa
from functools import wraps # noqa
# @wraps(_urlopen)
@contextmanager
def urlopen(*args, **kwargs):
with closing(_urlopen(*args, **kwargs)) as f:
yield f
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
class BaseIterator(object):
"""Subclass this and provide a "__next__()" method to obtain an iterator.
Useful only when the object being iterated is non-reusable (e.g. OK for a
parser, not for an in-memory table, yes for its iterator)."""
def __iter__(self):
return self
def __next__(self):
raise AbstractMethodError(self)
if not compat.PY3:
BaseIterator.next = lambda self: self.__next__()
def _is_url(url):
"""Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
try:
return parse_url(url).scheme in _VALID_URLS
except:
return False
def _is_s3_url(url):
"""Check for an s3, s3n, or s3a url"""
try:
return parse_url(url).scheme in ['s3', 's3n', 's3a']
except:
return False
def _expand_user(filepath_or_buffer):
"""Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
"""
if isinstance(filepath_or_buffer, string_types):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
def _validate_header_arg(header):
if isinstance(header, bool):
raise TypeError("Passing a bool to header is invalid. "
"Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names")
def _stringify_path(filepath_or_buffer):
"""Attempt to convert a path-like object to a string.
Parameters
----------
filepath_or_buffer : object to be converted
Returns
-------
str_filepath_or_buffer : maybe a string version of the object
Notes
-----
Objects supporting the fspath protocol (python 3.6+) are coerced
according to its __fspath__ method.
For backwards compatibility with older pythons, pathlib.Path and
py.path objects are specially coerced.
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
try:
import pathlib
_PATHLIB_INSTALLED = True
except ImportError:
_PATHLIB_INSTALLED = False
try:
from py.path import local as LocalPath
_PY_PATH_INSTALLED = True
except ImportError:
_PY_PATH_INSTALLED = False
if hasattr(filepath_or_buffer, '__fspath__'):
return filepath_or_buffer.__fspath__()
if _PATHLIB_INSTALLED and isinstance(filepath_or_buffer, pathlib.Path):
return text_type(filepath_or_buffer)
if _PY_PATH_INSTALLED and isinstance(filepath_or_buffer, LocalPath):
return filepath_or_buffer.strpath
return filepath_or_buffer
def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
compression=None):
"""
If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
Parameters
----------
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
or buffer
encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
Returns
-------
a filepath_or_buffer, the encoding, the compression
"""
filepath_or_buffer = _stringify_path(filepath_or_buffer)
if _is_url(filepath_or_buffer):
req = _urlopen(filepath_or_buffer)
content_encoding = req.headers.get('Content-Encoding', None)
if content_encoding == 'gzip':
# Override compression based on Content-Encoding header
compression = 'gzip'
reader = BytesIO(req.read())
return reader, encoding, compression
if _is_s3_url(filepath_or_buffer):
from pandas.io import s3
return s3.get_filepath_or_buffer(filepath_or_buffer,
encoding=encoding,
compression=compression)
if isinstance(filepath_or_buffer, (compat.string_types,
compat.binary_type,
mmap.mmap)):
return _expand_user(filepath_or_buffer), None, compression
if not is_file_like(filepath_or_buffer):
msg = "Invalid file path or buffer object type: {_type}"
raise ValueError(msg.format(_type=type(filepath_or_buffer)))
return filepath_or_buffer, None, compression
def file_path_to_url(path):
"""
converts an absolute native path to a FILE URL.
Parameters
----------
path : a path in native format
Returns
-------
a valid FILE URL
"""
return urljoin('file:', pathname2url(path))
_compression_to_extension = {
'gzip': '.gz',
'bz2': '.bz2',
'zip': '.zip',
'xz': '.xz',
}
def _infer_compression(filepath_or_buffer, compression):
"""
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
compression method is returned unchanged, unless it's invalid, in which
case an error is raised.
Parameters
----------
filepath_or_buf :
a path (str) or buffer
compression : str or None
the compression method including None for no compression and 'infer'
Returns
-------
string or None :
compression method
Raises
------
ValueError on invalid compression specified
"""
# No compression has been explicitly specified
if compression is None:
return None
# Infer compression
if compression == 'infer':
# Convert all path types (e.g. pathlib.Path) to strings
filepath_or_buffer = _stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, compat.string_types):
# Cannot infer compression of a buffer, assume no compression
return None
# Infer compression from the filename/URL extension
for compression, extension in _compression_to_extension.items():
if filepath_or_buffer.endswith(extension):
return compression
return None
# Compression has been specified. Check that it's valid
if compression in _compression_to_extension:
return compression
msg = 'Unrecognized compression type: {}'.format(compression)
valid = ['infer', None] + sorted(_compression_to_extension)
msg += '\nValid compression types are {}'.format(valid)
raise ValueError(msg)
def _get_handle(path_or_buf, mode, encoding=None, compression=None,
memory_map=False, is_text=True):
"""
Get file handle for given path/buffer and mode.
Parameters
----------
path_or_buf :
a path (str) or buffer
mode : str
mode to open path_or_buf with
encoding : str or None
compression : str or None
Supported compression protocols are gzip, bz2, zip, and xz
memory_map : boolean, default False
See parsers._parser_params for more information.
is_text : boolean, default True
whether file/buffer is in text format (csv, json, etc.), or in binary
mode (pickle, etc.)
Returns
-------
f : file-like
A file-like object
handles : list of file-like objects
A list of file-like object that were openned in this function.
"""
try:
from s3fs import S3File
need_text_wrapping = (BytesIO, S3File)
except ImportError:
need_text_wrapping = (BytesIO,)
handles = list()
f = path_or_buf
# Convert pathlib.Path/py.path.local or string
path_or_buf = _stringify_path(path_or_buf)
is_path = isinstance(path_or_buf, compat.string_types)
if compression:
if compat.PY2 and not is_path and encoding:
msg = 'compression with encoding is not yet supported in Python 2'
raise ValueError(msg)
# GZ Compression
if compression == 'gzip':
import gzip
if is_path:
f = gzip.open(path_or_buf, mode)
else:
f = gzip.GzipFile(fileobj=path_or_buf)
# BZ Compression
elif compression == 'bz2':
import bz2
if is_path:
f = bz2.BZ2File(path_or_buf, mode)
elif compat.PY2:
# Python 2's bz2 module can't take file objects, so have to
# run through decompress manually
f = StringIO(bz2.decompress(path_or_buf.read()))
path_or_buf.close()
else:
f = bz2.BZ2File(path_or_buf)
# ZIP Compression
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path_or_buf)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
elif len(zip_names) == 0:
raise ValueError('Zero files found in ZIP file {}'
.format(path_or_buf))
else:
raise ValueError('Multiple files found in ZIP file.'
' Only one file per ZIP: {}'
.format(zip_names))
# XZ Compression
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path_or_buf, mode)
# Unrecognized Compression
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
handles.append(f)
elif is_path:
if compat.PY2:
# Python 2
f = open(path_or_buf, mode)
elif encoding:
# Python 3 and encoding
f = open(path_or_buf, mode, encoding=encoding)
elif is_text:
# Python 3 and no explicit encoding
f = open(path_or_buf, mode, errors='replace')
else:
# Python 3 and binary mode
f = open(path_or_buf, mode)
handles.append(f)
# in Python 3, convert BytesIO or fileobjects passed with an encoding
if compat.PY3 and is_text and\
(compression or isinstance(f, need_text_wrapping)):
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=encoding)
handles.append(f)
if memory_map and hasattr(f, 'fileno'):
try:
g = MMapWrapper(f)
f.close()
f = g
except Exception:
# we catch any errors that may have occurred
# because that is consistent with the lower-level
# functionality of the C engine (pd.read_csv), so
# leave the file handler as is then
pass
return f, handles
class MMapWrapper(BaseIterator):
"""
Wrapper for the Python's mmap class so that it can be properly read in
by Python's csv.reader class.
Parameters
----------
f : file object
File object to be mapped onto memory. Must support the 'fileno'
method or have an equivalent attribute
"""
def __init__(self, f):
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __getattr__(self, name):
return getattr(self.mmap, name)
def __iter__(self):
return self
def __next__(self):
newline = self.mmap.readline()
# readline returns bytes, not str, in Python 3,
# but Python's CSV reader expects str, so convert
# the output to str before continuing
if compat.PY3:
newline = compat.bytes_to_str(newline)
# mmap doesn't raise if reading past the allocated
# data but instead returns an empty string, so raise
# if that is returned
if newline == '':
raise StopIteration
return newline
if not compat.PY3:
MMapWrapper.next = lambda self: self.__next__()
class UTF8Recoder(BaseIterator):
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def read(self, bytes=-1):
return self.reader.read(bytes).encode("utf-8")
def readline(self):
return self.reader.readline().encode("utf-8")
def next(self):
return next(self.reader).encode("utf-8")
if compat.PY3: # pragma: no cover
def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
# ignore encoding
return csv.reader(f, dialect=dialect, **kwds)
def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
return csv.writer(f, dialect=dialect, **kwds)
else:
class UnicodeReader(BaseIterator):
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
On Python 3, this is replaced (below) by csv.reader, which handles
unicode.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def __next__(self):
row = next(self.reader)
return [compat.text_type(s, "utf-8") for s in row]
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
self.quoting = kwds.get("quoting", None)
def writerow(self, row):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
row = [x if _check_as_is(x)
else pprint_thing(x).encode("utf-8") for x in row]
self.writer.writerow([s for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
for i, row in enumerate(rows):
rows[i] = [x if _check_as_is(x)
else pprint_thing(x).encode("utf-8") for x in row]
self.writer.writerows([[s for s in row] for row in rows])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
|
|
import os
import sys
import subprocess
from six import print_
from ccmlib import common, repository
from ccmlib.cluster import Cluster
from ccmlib.cluster_factory import ClusterFactory
from ccmlib.cmds.command import Cmd
from ccmlib.common import ArgumentError
from ccmlib.dse_cluster import DseCluster
from ccmlib.dse_node import DseNode
from ccmlib.node import Node, NodeError
def cluster_cmds():
return [
"create",
"add",
"populate",
"list",
"switch",
"status",
"remove",
"clear",
"liveset",
"start",
"stop",
"flush",
"compact",
"stress",
"updateconf",
"updatedseconf",
"updatelog4j",
"cli",
"setdir",
"bulkload",
"setlog",
"scrub",
"verify",
"invalidatecache",
"checklogerror",
"showlastlog",
"jconsole"
]
def parse_populate_count(v):
if v is None:
return None
tmp = v.split(':')
if len(tmp) == 1:
return int(tmp[0])
else:
return [int(t) for t in tmp]
class ClusterCreateCmd(Cmd):
def description(self):
return "Create a new cluster"
def get_parser(self):
usage = "usage: ccm create [options] cluster_name"
parser = self._get_default_parser(usage, self.description())
parser.add_option('--no-switch', action="store_true", dest="no_switch",
help="Don't switch to the newly created cluster", default=False)
parser.add_option('-p', '--partitioner', type="string", dest="partitioner",
help="Set the cluster partitioner class")
parser.add_option('-v', "--version", type="string", dest="version",
help="Download and use provided cassandra or dse version. If version is of the form 'git:<branch name>', then the specified cassandra branch will be downloaded from the git repo and compiled. (takes precedence over --install-dir)", default=None)
parser.add_option('-o', "--opsc", type="string", dest="opscenter",
help="Download and use provided opscenter version to install with DSE. Will have no effect on cassandra installs)", default=None)
parser.add_option("--dse", action="store_true", dest="dse",
help="Use with -v to indicate that the version being loaded is DSE")
parser.add_option("--dse-username", type="string", dest="dse_username",
help="The username to use to download DSE with", default=None)
parser.add_option("--dse-password", type="string", dest="dse_password",
help="The password to use to download DSE with", default=None)
parser.add_option("--install-dir", type="string", dest="install_dir",
help="Path to the cassandra or dse directory to use [default %default]", default="./")
parser.add_option('-n', '--nodes', type="string", dest="nodes",
help="Populate the new cluster with that number of nodes (a single int or a colon-separate list of ints for multi-dc setups)")
parser.add_option('-i', '--ipprefix', type="string", dest="ipprefix",
help="Ipprefix to use to create the ip of a node while populating")
parser.add_option('-I', '--ip-format', type="string", dest="ipformat",
help="Format to use when creating the ip of a node (supports enumerating ipv6-type addresses like fe80::%d%lo0)")
parser.add_option('-s', "--start", action="store_true", dest="start_nodes",
help="Start nodes added through -s", default=False)
parser.add_option('-d', "--debug", action="store_true", dest="debug",
help="If -s is used, show the standard output when starting the nodes", default=False)
parser.add_option('-b', "--binary-protocol", action="store_true", dest="binary_protocol",
help="Enable the binary protocol (starting from C* 1.2.5 the binary protocol is started by default and this option is a no-op)", default=False)
parser.add_option('-D', "--debug-log", action="store_true", dest="debug_log",
help="With -n, sets debug logging on the new nodes", default=False)
parser.add_option('-T', "--trace-log", action="store_true", dest="trace_log",
help="With -n, sets trace logging on the new nodes", default=False)
parser.add_option("--vnodes", action="store_true", dest="vnodes",
help="Use vnodes (256 tokens). Must be paired with -n.", default=False)
parser.add_option('--jvm_arg', action="append", dest="jvm_args",
help="Specify a JVM argument", default=[])
parser.add_option('--profile', action="store_true", dest="profile",
help="Start the nodes with yourkit agent (only valid with -s)", default=False)
parser.add_option('--profile-opts', type="string", action="store", dest="profile_options",
help="Yourkit options when profiling", default=None)
parser.add_option('--ssl', type="string", dest="ssl_path",
help="Path to keystore.jks and cassandra.crt files (and truststore.jks [not required])", default=None)
parser.add_option('--require_client_auth', action="store_true", dest="require_client_auth",
help="Enable client authentication (only vaid with --ssl)", default=False)
parser.add_option('--node-ssl', type="string", dest="node_ssl_path",
help="Path to keystore.jks and truststore.jks for internode encryption", default=None)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, cluster_name=True)
if options.ipprefix and options.ipformat:
parser.print_help()
parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
self.nodes = parse_populate_count(options.nodes)
if self.options.vnodes and self.nodes is None:
print_("Can't set --vnodes if not populating cluster in this command.")
parser.print_help()
exit(1)
if not options.version:
try:
common.validate_install_dir(options.install_dir)
except ArgumentError:
parser.print_help()
parser.error("%s is not a valid cassandra directory. You must define a cassandra dir or version." % options.install_dir)
def run(self):
try:
if self.options.dse or (not self.options.version and common.isDse(self.options.install_dir)):
cluster = DseCluster(self.path, self.name, install_dir=self.options.install_dir, version=self.options.version, dse_username=self.options.dse_username, dse_password=self.options.dse_password, opscenter=self.options.opscenter, verbose=True)
else:
cluster = Cluster(self.path, self.name, install_dir=self.options.install_dir, version=self.options.version, verbose=True)
except OSError as e:
cluster_dir = os.path.join(self.path, self.name)
import traceback
print_('Cannot create cluster: %s\n%s' % (str(e), traceback.format_exc()), file=sys.stderr)
exit(1)
if self.options.partitioner:
cluster.set_partitioner(self.options.partitioner)
if cluster.cassandra_version() >= "1.2.5":
self.options.binary_protocol = True
if self.options.binary_protocol:
cluster.set_configuration_options({'start_native_transport': True})
if cluster.cassandra_version() >= "1.2" and self.options.vnodes:
cluster.set_configuration_options({'num_tokens': 256})
if not self.options.no_switch:
common.switch_cluster(self.path, self.name)
print_('Current cluster is now: %s' % self.name)
if not (self.options.ipprefix or self.options.ipformat):
self.options.ipformat = '127.0.0.%d'
if self.options.ssl_path:
cluster.enable_ssl(self.options.ssl_path, self.options.require_client_auth)
if self.options.node_ssl_path:
cluster.enable_internode_ssl(self.options.node_ssl_path)
if self.nodes is not None:
try:
if self.options.debug_log:
cluster.set_log_level("DEBUG")
if self.options.trace_log:
cluster.set_log_level("TRACE")
cluster.populate(self.nodes, self.options.debug, use_vnodes=self.options.vnodes, ipprefix=self.options.ipprefix, ipformat=self.options.ipformat)
if self.options.start_nodes:
profile_options = None
if self.options.profile:
profile_options = {}
if self.options.profile_options:
profile_options['options'] = self.options.profile_options
if cluster.start(verbose=self.options.debug_log, wait_for_binary_proto=self.options.binary_protocol, jvm_args=self.options.jvm_args, profile_options=profile_options) is None:
details = ""
if not self.options.debug_log:
details = " (you can use --debug-log for more information)"
print_("Error starting nodes, see above for details%s" % details, file=sys.stderr)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterAddCmd(Cmd):
def description(self):
return "Add a new node to the current cluster"
def get_parser(self):
usage = "usage: ccm add [options] node_name"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-b', '--auto-bootstrap', action="store_true", dest="bootstrap",
help="Set auto bootstrap for the node", default=False)
parser.add_option('-s', '--seeds', action="store_true", dest="is_seed",
help="Configure this node as a seed", default=False)
parser.add_option('-i', '--itf', type="string", dest="itfs",
help="Set host and port for thrift, the binary protocol and storage (format: host[:port])")
parser.add_option('-t', '--thrift-itf', type="string", dest="thrift_itf",
help="Set the thrift host and port for the node (format: host[:port])")
parser.add_option('-l', '--storage-itf', type="string", dest="storage_itf",
help="Set the storage (cassandra internal) host and port for the node (format: host[:port])")
parser.add_option('--binary-itf', type="string", dest="binary_itf",
help="Set the binary protocol host and port for the node (format: host[:port]).")
parser.add_option('-j', '--jmx-port', type="string", dest="jmx_port",
help="JMX port for the node", default="7199")
parser.add_option('-r', '--remote-debug-port', type="string", dest="remote_debug_port",
help="Remote Debugging Port for the node", default="2000")
parser.add_option('-n', '--token', type="string", dest="initial_token",
help="Initial token for the node", default=None)
parser.add_option('-d', '--data-center', type="string", dest="data_center",
help="Datacenter name this node is part of", default=None)
parser.add_option('--dse', action="store_true", dest="dse_node",
help="Add node to DSE Cluster", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True, load_node=False)
if options.itfs is None and (options.thrift_itf is None or options.storage_itf is None or options.binary_itf is None):
print_('Missing thrift and/or storage and/or binary protocol interfaces or jmx port', file=sys.stderr)
parser.print_help()
exit(1)
used_jmx_ports = [node.jmx_port for node in self.cluster.nodelist()]
if options.jmx_port in used_jmx_ports:
print_("This JMX port is already in use. Choose another.", file=sys.stderr)
parser.print_help()
exit(1)
if options.thrift_itf is None:
options.thrift_itf = options.itfs
if options.storage_itf is None:
options.storage_itf = options.itfs
if options.binary_itf is None:
options.binary_itf = options.itfs
self.thrift = common.parse_interface(options.thrift_itf, 9160)
self.storage = common.parse_interface(options.storage_itf, 7000)
self.binary = common.parse_interface(options.binary_itf, 9042)
if self.binary[0] != self.thrift[0]:
print_('Cannot set a binary address different from the thrift one', file=sys.stderr)
exit(1)
self.jmx_port = options.jmx_port
self.remote_debug_port = options.remote_debug_port
self.initial_token = options.initial_token
def run(self):
try:
if self.options.dse_node:
node = DseNode(self.name, self.cluster, self.options.bootstrap, self.thrift, self.storage, self.jmx_port, self.remote_debug_port, self.initial_token, binary_interface=self.binary)
else:
node = Node(self.name, self.cluster, self.options.bootstrap, self.thrift, self.storage, self.jmx_port, self.remote_debug_port, self.initial_token, binary_interface=self.binary)
self.cluster.add(node, self.options.is_seed, self.options.data_center)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterPopulateCmd(Cmd):
def description(self):
return "Add a group of new nodes with default options"
def get_parser(self):
usage = "usage: ccm populate -n <node count> {-d}"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-n', '--nodes', type="string", dest="nodes",
help="Number of nodes to populate with (a single int or a colon-separate list of ints for multi-dc setups)")
parser.add_option('-d', '--debug', action="store_true", dest="debug",
help="Enable remote debugging options", default=False)
parser.add_option('--vnodes', action="store_true", dest="vnodes",
help="Populate using vnodes", default=False)
parser.add_option('-i', '--ipprefix', type="string", dest="ipprefix",
help="Ipprefix to use to create the ip of a node")
parser.add_option('-I', '--ip-format', type="string", dest="ipformat",
help="Format to use when creating the ip of a node (supports enumerating ipv6-type addresses like fe80::%d%lo0)")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
if options.ipprefix and options.ipformat:
parser.print_help()
parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
self.nodes = parse_populate_count(options.nodes)
if self.nodes is None:
parser.print_help()
parser.error("Not a valid number of nodes. Did you use -n?")
exit(1)
def run(self):
try:
if self.cluster.cassandra_version() >= "1.2" and self.options.vnodes:
self.cluster.set_configuration_options({'num_tokens': 256})
if not (self.options.ipprefix or self.options.ipformat):
self.options.ipformat = '127.0.0.%d'
self.cluster.populate(self.nodes, self.options.debug, use_vnodes=self.options.vnodes, ipprefix=self.options.ipprefix, ipformat=self.options.ipformat)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterListCmd(Cmd):
def description(self):
return "List existing clusters"
def get_parser(self):
usage = "usage: ccm list [options]"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
try:
current = common.current_cluster_name(self.path)
except Exception as e:
current = ''
for dir in os.listdir(self.path):
if os.path.exists(os.path.join(self.path, dir, 'cluster.conf')):
print_(" %s%s" % ('*' if current == dir else ' ', dir))
class ClusterSwitchCmd(Cmd):
def description(self):
return "Switch of current (active) cluster"
def get_parser(self):
usage = "usage: ccm switch [options] cluster_name"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, cluster_name=True)
if not os.path.exists(os.path.join(self.path, self.name, 'cluster.conf')):
print_("%s does not appear to be a valid cluster (use ccm list to view valid clusters)" % self.name, file=sys.stderr)
exit(1)
def run(self):
common.switch_cluster(self.path, self.name)
class ClusterStatusCmd(Cmd):
def description(self):
return "Display status on the current cluster"
def get_parser(self):
usage = "usage: ccm status [options]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="Print full information on all nodes", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.show(self.options.verbose)
class ClusterRemoveCmd(Cmd):
def description(self):
return "Remove the current or specified cluster (delete all data)"
def get_parser(self):
usage = "usage: ccm remove [options] [cluster_name]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
self.other_cluster = None
if len(args) > 0:
# Setup to remove the specified cluster:
Cmd.validate(self, parser, options, args)
self.other_cluster = args[0]
if not os.path.exists(os.path.join(
self.path, self.other_cluster, 'cluster.conf')):
print_("%s does not appear to be a valid cluster"
" (use ccm list to view valid clusters)"
% self.other_cluster, file=sys.stderr)
exit(1)
else:
# Setup to remove the current cluster:
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
if self.other_cluster:
# Remove the specified cluster:
cluster = ClusterFactory.load(self.path, self.other_cluster)
cluster.remove()
# Remove CURRENT flag if the specified cluster is the current cluster:
if self.other_cluster == common.current_cluster_name(self.path):
os.remove(os.path.join(self.path, 'CURRENT'))
else:
# Remove the current cluster:
self.cluster.remove()
os.remove(os.path.join(self.path, 'CURRENT'))
class ClusterClearCmd(Cmd):
def description(self):
return "Clear the current cluster data (and stop all nodes)"
def get_parser(self):
usage = "usage: ccm clear [options]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.clear()
class ClusterLivesetCmd(Cmd):
def description(self):
return "Print a comma-separated list of addresses of running nodes (handful in scripts)"
def get_parser(self):
usage = "usage: ccm liveset [options]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
l = [node.network_interfaces['storage'][0] for node in list(self.cluster.nodes.values()) if node.is_live()]
print_(",".join(l))
class ClusterSetdirCmd(Cmd):
def description(self):
return "Set the install directory (cassandra or dse) to use"
def get_parser(self):
usage = "usage: ccm setdir [options]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', "--version", type="string", dest="version",
help="Download and use provided cassandra or dse version. If version is of the form 'git:<branch name>', then the specified cassandra branch will be downloaded from the git repo and compiled. (takes precedence over --install-dir)", default=None)
parser.add_option("--install-dir", type="string", dest="install_dir",
help="Path to the cassandra or dse directory to use [default %default]", default="./")
parser.add_option('-n', '--node', type="string", dest="node",
help="Set directory only for the specified node")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
try:
target = self.cluster
if self.options.node:
target = self.cluster.nodes.get(self.options.node)
if not target:
print_("Node not found: %s" % self.options.node)
return
target.set_install_dir(install_dir=self.options.install_dir, version=self.options.version, verbose=True)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterClearrepoCmd(Cmd):
def description(self):
return "Cleanup downloaded cassandra sources"
def get_parser(self):
usage = "usage: ccm clearrepo [options]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
repository.clean_all()
class ClusterStartCmd(Cmd):
def description(self):
return "Start all the non started nodes of the current cluster"
def get_parser(self):
usage = "usage: ccm cluster start [options]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="Print standard output of cassandra process", default=False)
parser.add_option('--no-wait', action="store_true", dest="no_wait",
help="Do not wait for cassandra node to be ready", default=False)
parser.add_option('--wait-other-notice', action="store_true", dest="wait_other_notice",
help="Wait until all other live nodes of the cluster have marked this node UP", default=False)
parser.add_option('--wait-for-binary-proto', action="store_true", dest="wait_for_binary_proto",
help="Wait for the binary protocol to start", default=False)
parser.add_option('--jvm_arg', action="append", dest="jvm_args",
help="Specify a JVM argument", default=[])
parser.add_option('--profile', action="store_true", dest="profile",
help="Start the nodes with yourkit agent (only valid with -s)", default=False)
parser.add_option('--profile-opts', type="string", action="store", dest="profile_options",
help="Yourkit options when profiling", default=None)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
try:
profile_options = None
if self.options.profile:
profile_options = {}
if self.options.profile_options:
profile_options['options'] = self.options.profile_options
if len(self.cluster.nodes) == 0:
print_("No node in this cluster yet. Use the populate command before starting.")
exit(1)
if self.cluster.start(no_wait=self.options.no_wait,
wait_other_notice=self.options.wait_other_notice,
wait_for_binary_proto=self.options.wait_for_binary_proto,
verbose=self.options.verbose,
jvm_args=self.options.jvm_args,
profile_options=profile_options) is None:
details = ""
if not self.options.verbose:
details = " (you can use --verbose for more information)"
print_("Error starting nodes, see above for details%s" % details, file=sys.stderr)
exit(1)
except NodeError as e:
print_(str(e), file=sys.stderr)
print_("Standard error output is:", file=sys.stderr)
for line in e.process.stderr:
print_(line.rstrip('\n'), file=sys.stderr)
exit(1)
class ClusterStopCmd(Cmd):
def description(self):
return "Stop all the nodes of the cluster"
def get_parser(self):
usage = "usage: ccm cluster stop [options] name"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="Print nodes that were not running", default=False)
parser.add_option('--no-wait', action="store_true", dest="no_wait",
help="Do not wait for the node to be stopped", default=False)
parser.add_option('-g', '--gently', action="store_true", dest="gently",
help="Shut down gently (default)", default=True)
parser.add_option('--not-gently', action="store_false", dest="gently",
help="Shut down immediately (kill -9)", default=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
try:
not_running = self.cluster.stop(not self.options.no_wait, gently=self.options.gently)
if self.options.verbose and len(not_running) > 0:
sys.stdout.write("The following nodes were not running: ")
for node in not_running:
sys.stdout.write(node.name + " ")
print_("")
except NodeError as e:
print_(str(e), file=sys.stderr)
exit(1)
class _ClusterNodetoolCmd(Cmd):
def get_parser(self):
parser = self._get_default_parser(self.usage, self.description())
return parser
def description(self):
return self.descr_text
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.nodetool(self.nodetool_cmd)
class ClusterFlushCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster flush [options] name"
nodetool_cmd = 'flush'
descr_text = "Flush all (running) nodes of the cluster"
class ClusterCompactCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster compact [options] name"
nodetool_cmd = 'compact'
descr_text = "Compact all (running) node of the cluster"
class ClusterDrainCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster drain [options] name"
nodetool_cmd = 'drain'
descr_text = "Drain all (running) node of the cluster"
class ClusterStressCmd(Cmd):
def description(self):
return "Run stress using all live nodes"
def get_parser(self):
usage = "usage: ccm stress [options] [stress_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.stress_options = args + parser.get_ignored()
def run(self):
try:
self.cluster.stress(self.stress_options)
except Exception as e:
print_(e, file=sys.stderr)
class ClusterUpdateconfCmd(Cmd):
def description(self):
return "Update the cassandra config files for all nodes"
def get_parser(self):
usage = "usage: ccm updateconf [options] [ new_setting | ... ], where new_setting should be a string of the form 'compaction_throughput_mb_per_sec: 32'; nested options can be separated with a period like 'client_encryption_options.enabled: false'"
parser = self._get_default_parser(usage, self.description())
parser.add_option('--no-hh', '--no-hinted-handoff', action="store_false",
dest="hinted_handoff", default=True, help="Disable hinted handoff")
parser.add_option('--batch-cl', '--batch-commit-log', action="store_true",
dest="cl_batch", default=False, help="Set commit log to batch mode")
parser.add_option('--rt', '--rpc-timeout', action="store", type='int',
dest="rpc_timeout", help="Set rpc timeout")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.setting = common.parse_settings(args)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
self.setting['hinted_handoff_enabled'] = self.options.hinted_handoff
if self.options.rpc_timeout is not None:
if self.cluster.cassandra_version() < "1.2":
self.setting['rpc_timeout_in_ms'] = self.options.rpc_timeout
else:
self.setting['read_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['range_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['write_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['truncate_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['request_timeout_in_ms'] = self.options.rpc_timeout
self.cluster.set_configuration_options(values=self.setting, batch_commitlog=self.options.cl_batch)
class ClusterUpdatedseconfCmd(Cmd):
def description(self):
return "Update the dse config files for all nodes"
def get_parser(self):
usage = "usage: ccm updatedseconf [options] [ new_setting | ... ], where new_setting should be a string of the form 'max_solr_concurrency_per_core: 2'; nested options can be separated with a period like 'cql_slow_log_options.enabled: true'"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.setting = common.parse_settings(args)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
self.cluster.set_dse_configuration_options(values=self.setting)
#
# Class implements the functionality of updating log4j-server.properties
# on ALL nodes by copying the given config into
# ~/.ccm/name-of-cluster/nodeX/conf/log4j-server.properties
#
class ClusterUpdatelog4jCmd(Cmd):
def description(self):
return "Update the Cassandra log4j-server.properties configuration file on all nodes"
def get_parser(self):
usage = "usage: ccm updatelog4j -p <log4j config>"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
parser.add_option('-p', '--path', type="string", dest="log4jpath",
help="Path to new Cassandra log4j configuration file")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.log4jpath = options.log4jpath
if self.log4jpath is None:
raise KeyError("[Errno] -p or --path <path of new log4j congiguration file> is not provided")
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
except KeyError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
try:
self.cluster.update_log4j(self.log4jpath)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterCliCmd(Cmd):
def description(self):
return "Launch cassandra cli connected to some live node (if any)"
def get_parser(self):
usage = "usage: ccm cli [options] [cli_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
parser.add_option('-x', '--exec', type="string", dest="cmds", default=None,
help="Execute the specified commands and exit")
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="With --exec, show cli output after completion", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.cli_options = parser.get_ignored() + args[1:]
def run(self):
self.cluster.run_cli(self.options.cmds, self.options.verbose, self.cli_options)
class ClusterBulkloadCmd(Cmd):
def description(self):
return "Bulkload files into the cluster by connecting to some live node (if any)"
def get_parser(self):
usage = "usage: ccm bulkload [options] [sstable_dir]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.loader_options = parser.get_ignored() + args
def run(self):
self.cluster.bulkload(self.loader_options)
class ClusterScrubCmd(Cmd):
def description(self):
return "Scrub files"
def get_parser(self):
usage = "usage: ccm scrub [options] <keyspace> <cf>"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.scrub_options = parser.get_ignored() + args
def run(self):
self.cluster.scrub(self.scrub_options)
class ClusterVerifyCmd(Cmd):
def description(self):
return "Verify files"
def get_parser(self):
usage = "usage: ccm verify [options] <keyspace> <cf>"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.verify_options = parser.get_ignored() + args
def run(self):
self.cluster.verify(self.verify_options)
class ClusterSetlogCmd(Cmd):
def description(self):
return "Set log level (INFO, DEBUG, ...) with/without Java class for all node of the cluster - require a node restart"
def get_parser(self):
usage = "usage: ccm setlog [options] level"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-c', '--class', type="string", dest="class_name", default=None,
help="Optional java class/package. Logging will be set for only this class/package if set")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
if len(args) == 0:
print_('Missing log level', file=sys.stderr)
parser.print_help()
exit(1)
self.level = args[0]
def run(self):
try:
self.cluster.set_log_level(self.level, self.options.class_name)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterInvalidatecacheCmd(Cmd):
def description(self):
return "Destroys ccm's local git cache."
def get_parser(self):
usage = "usage: ccm invalidatecache"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
try:
common.invalidate_cache()
except Exception as e:
print_(str(e), file=sys.stderr)
print_("Error while deleting cache. Please attempt manually.")
exit(1)
class ClusterChecklogerrorCmd(Cmd):
def description(self):
return "Check for errors in log file of each node."
def get_parser(self):
usage = "usage: ccm checklogerror"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
for node in self.cluster.nodelist():
errors = node.grep_log_for_errors()
for mylist in errors:
for line in mylist:
print_(line)
class ClusterShowlastlogCmd(Cmd):
def description(self):
return "Show the last.log for the most recent build through your $PAGER"
def get_parser(self):
usage = "usage: ccm showlastlog"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
log = repository.lastlogfilename()
pager = os.environ.get('PAGER', common.platform_pager())
os.execvp(pager, (pager, log))
class ClusterJconsoleCmd(Cmd):
def description(self):
return "Opens jconsole client and connects to all running nodes"
def get_parser(self):
usage = "usage: ccm jconsole"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
cmds = ["jconsole"] + ["localhost:%s" % node.jmx_port for node in self.cluster.nodes.values()]
try:
subprocess.call(cmds, stderr=sys.stderr)
except OSError as e:
print_("Could not start jconsole. Please make sure jconsole can be found in your $PATH.")
exit(1)
|
|
"""Classes that abstract reading from various types of filesystems.
Currently two types of 'filesystem' are supported:
* the local file system, via python's native file() objects
* Amazon's S3 or Google Storage, using the boto library (only if boto is installed; boto is not a requirement)
For each filesystem, two types of reader classes are provided:
* parallel readers are intended to serve as the entry point to a Spark workflow. They provide a read() method
that itself calls the spark context's parallelize() method, setting up a workflow with one partition per file. This
method returns a Spark RDD of <string filename, string binary data>.
* file readers are intended to abstract across the supported filesystems, providing a consistent interface to several
common file and filesystem operations. These include listing files in a directory, reading the contents of a file,
and providing a file handle or handle-like object that itself supports read(), seek(), and tell() operations.
The reader classes also all support a common syntax for path specifications, including both "standard" file paths
and "URI-like" syntax with an explicitly specified scheme (for instance, "file://", "gs://" or "s3n://"). This path specification
syntax allows a single wildcard "*" character in the filename, making possible paths like
"s3n:///my-bucket/key-one/foo*.bar", referring to "every object in the S3 bucket my-bucket whose key starts with
'key-one/foo' and ends with '.bar'".
"""
import errno
import fnmatch
import glob
import itertools
import os
import urllib
import urlparse
import logging
from thunder.utils.aws import AWSCredentials, S3ConnectionWithAnon
_haveBoto = False
try:
import boto
logging.getLogger('boto').setLevel(logging.CRITICAL)
_haveBoto = True
except ImportError:
boto = None
class FileNotFoundError(IOError):
"""An exception to be thrown when reader implementations can't find a requested file.
Implementations are responsible for watching for their own appropriate exceptions and rethrowing
FileNotFoundError.
See PEP 3151 for background and inspiration.
"""
pass
def appendExtensionToPathSpec(dataPath, ext=None):
"""Helper function for consistent handling of paths given with separately passed file extensions
Returns
-------
result: string dataPath
dataPath string formed by concatenating passed `dataPath` with "*" and passed `ext`, with some
normalization as appropriate
"""
if ext:
if '*' in dataPath:
# we already have a literal wildcard, which we take as a sign that the user knows
# what they're doing and don't want us overriding their path by appending extensions to it
return dataPath
elif os.path.splitext(dataPath)[1]:
# looks like we already have a literal extension specified at the end of dataPath.
# go with that.
return dataPath
else:
# no wildcard in path yet
# check whether we already end in `ext`, which suggests we've been passed a literal filename.
# prepend '.' to ext, as mild protection against the case where we have a directory 'bin' and
# are looking in it for files named '*.bin'.
if not ext.startswith('.'):
ext = '.'+ext
if not dataPath.endswith(ext):
# we have an extension and we'd like to append it.
# we assume that dataPath should be pointing to a directory at this point, but we might
# or might not have a directory separator at the end of it. add it if we don't.
if not dataPath.endswith(os.path.sep):
dataPath += os.path.sep
# return a path with "/*."+`ext` added to it.
return dataPath+'*'+ext
else:
# we are asking to append `ext`, but it looks like dataPath already ends with '.'+`ext`
return dataPath
else:
return dataPath
def selectByStartAndStopIndices(files, startIdx, stopIdx):
"""Helper function for consistent handling of start and stop indices
"""
if startIdx or stopIdx:
if startIdx is None:
startIdx = 0
if stopIdx is None:
stopIdx = len(files)
files = files[startIdx:stopIdx]
return files
def _localRead(filePath, startOffset=None, size=-1):
"""Wrapper around open(filepath, 'rb') that returns the contents of the file as a string.
Will rethrow FileNotFoundError if it receives an IOError with error number indicating that the file isn't found.
"""
try:
with open(filePath, 'rb') as f:
if startOffset:
f.seek(startOffset)
buf = f.read(size)
except IOError, e:
if e.errno == errno.ENOENT:
raise FileNotFoundError(e)
else:
raise
return buf
class LocalFSParallelReader(object):
"""Parallel reader backed by python's native file() objects.
"""
def __init__(self, sparkContext, **kwargs):
# kwargs allow AWS credentials to be passed into generic Readers w/o exceptions being raised
# in this case kwargs are just ignored
self.sc = sparkContext
self.lastNRecs = None
@staticmethod
def uriToPath(uri):
# thanks stack overflow:
# http://stackoverflow.com/questions/5977576/is-there-a-convenient-way-to-map-a-file-uri-to-os-path
path = urllib.url2pathname(urlparse.urlparse(uri).path)
if uri and (not path):
# passed a nonempty uri, got an empty path back
# this happens when given a file uri that starts with "file://" instead of "file:///"
# error here to prevent unexpected behavior of looking at current working directory
raise ValueError("Could not interpret %s as URI. " +
"Note absolute paths in URIs should start with 'file:///', not 'file://'")
return path
@staticmethod
def _listFilesRecursive(absPath, ext=None):
filenames = set()
for root, dirs, files in os.walk(absPath):
if ext:
files = fnmatch.filter(files, '*.' + ext)
for filename in files:
filenames.add(os.path.join(root, filename))
filenames = list(filenames)
filenames.sort()
return sorted(filenames)
@staticmethod
def _listFilesNonRecursive(absPath, ext=None):
if os.path.isdir(absPath):
if ext:
files = glob.glob(os.path.join(absPath, '*.' + ext))
else:
files = [os.path.join(absPath, fname) for fname in os.listdir(absPath)]
else:
files = glob.glob(absPath)
# filter out directories
files = [fpath for fpath in files if not os.path.isdir(fpath)]
return sorted(files)
def listFiles(self, absPath, ext=None, startIdx=None, stopIdx=None, recursive=False):
"""Get sorted list of file paths matching passed `absPath` path and `ext` filename extension
"""
files = LocalFSParallelReader._listFilesNonRecursive(absPath, ext) if not recursive else \
LocalFSParallelReader._listFilesRecursive(absPath, ext)
if len(files) < 1:
raise FileNotFoundError('cannot find files of type "%s" in %s' % (ext if ext else '*', absPath))
files = selectByStartAndStopIndices(files, startIdx, stopIdx)
return files
def read(self, dataPath, ext=None, startIdx=None, stopIdx=None, recursive=False, npartitions=None):
"""Sets up Spark RDD across files specified by dataPath on local filesystem.
Returns RDD of <integer file index, string buffer> k/v pairs.
"""
if not hasattr(dataPath, '__iter__'):
absPath = self.uriToPath(dataPath)
filePaths = self.listFiles(absPath, ext=ext, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
else:
filePaths = [filePath for filePath in dataPath]
lfilepaths = len(filePaths)
self.lastNRecs = lfilepaths
npartitions = min(npartitions, lfilepaths) if npartitions else lfilepaths
return self.sc.parallelize(enumerate(filePaths), npartitions).map(lambda (k, v): (k, _localRead(v)))
class _BotoClient(object):
"""
Superclass for boto-based S3 and Google storage readers.
"""
@staticmethod
def parseQuery(query, delim='/'):
storageScheme = ''
keyName = ''
prefix = ''
postfix = ''
parseResult = urlparse.urlparse(query)
bucketName = parseResult.netloc
keyQuery = parseResult.path.lstrip(delim)
if not parseResult.scheme.lower() in ('', "gs", "s3", "s3n"):
raise ValueError("Query scheme must be one of '', 'gs', 's3', or 's3n'; got: '%s'" % parseResult.scheme)
storageScheme = parseResult.scheme.lower()
# special case handling for strings of form "/bucket/dir":
if (not bucketName.strip()) and keyQuery:
toks = keyQuery.split(delim, 1)
bucketName = toks[0]
if len(toks) == 2:
keyQuery = toks[1]
else:
keyQuery = ''
if not bucketName.strip():
raise ValueError("Could not parse bucket name from query string '%s'" % query)
keyToks = keyQuery.split("*")
nkeyToks = len(keyToks)
if nkeyToks == 0:
pass
elif nkeyToks == 1:
keyName = keyToks[0]
elif nkeyToks == 2:
rdelimIdx = keyToks[0].rfind(delim)
if rdelimIdx >= 0:
keyName = keyToks[0][:(rdelimIdx+1)]
prefix = keyToks[0][(rdelimIdx+1):] if len(keyToks[0]) > (rdelimIdx+1) else ''
else:
prefix = keyToks[0]
postfix = keyToks[1]
else:
raise ValueError("Only one wildcard ('*') allowed in query string, got: '%s'" % query)
return storageScheme, bucketName, keyName, prefix, postfix
@staticmethod
def checkPrefix(bucket, keyPath, delim='/'):
return len(bucket.get_all_keys(prefix=keyPath, delimiter=delim, max_keys=1)) > 0
@staticmethod
def filterPredicate(key, post, inclusive=False):
kname = key.name
keyEndsWithPostfix = kname.endswith(post)
return keyEndsWithPostfix if inclusive else not keyEndsWithPostfix
@staticmethod
def retrieveKeys(bucket, key, prefix='', postfix='', delim='/', includeDirectories=False,
recursive=False):
if key and prefix:
assert key.endswith(delim)
keyPath = key+prefix
# if we are asking for a key that doesn't end in a delimiter, check whether it might
# actually be a directory
if not keyPath.endswith(delim) and keyPath:
# not all directories have actual keys associated with them
# check for matching prefix instead of literal key:
if _BotoClient.checkPrefix(bucket, keyPath+delim, delim=delim):
# found a directory; change path so that it explicitly refers to directory
keyPath += delim
listDelim = delim if not recursive else None
results = bucket.list(prefix=keyPath, delimiter=listDelim)
if postfix:
return itertools.ifilter(lambda k_: _BotoClient.filterPredicate(k_, postfix, inclusive=True), results)
elif not includeDirectories:
return itertools.ifilter(lambda k_: _BotoClient.filterPredicate(k_, delim, inclusive=False), results)
else:
return results
def __init__(self, awsCredentialsOverride=None):
"""Initialization; validates that AWS keys are available as environment variables.
Will let boto library look up credentials itself according to its own rules - e.g. first looking for
AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY, then going through several possible config files and finally
looking for a ~/.aws/credentials .ini-formatted file. See boto docs:
http://boto.readthedocs.org/en/latest/boto_config_tut.html
However, if an AWSCredentials object is provided, its `awsAccessKeyId` and `awsSecretAccessKey` attributes
will be used instead of those found by the standard boto credential lookup process.
"""
if not _haveBoto:
raise ValueError("The boto package does not appear to be available; boto is required for BotoReader")
self.awsCredentialsOverride = awsCredentialsOverride if awsCredentialsOverride else AWSCredentials()
class BotoParallelReader(_BotoClient):
"""
Parallel reader backed by boto AWS client library.
"""
def __init__(self, sparkContext, awsCredentialsOverride=None):
super(BotoParallelReader, self).__init__(awsCredentialsOverride=awsCredentialsOverride)
self.sc = sparkContext
self.lastNRecs = None
def _listFilesImpl(self, dataPath, ext=None, startIdx=None, stopIdx=None, recursive=False):
parse = _BotoClient.parseQuery(dataPath)
storageScheme = parse[0]
bucketName = parse[1]
if storageScheme == 's3' or storageScheme == 's3n':
conn = S3ConnectionWithAnon(*self.awsCredentialsOverride.credentials)
bucket = conn.get_bucket(parse[1])
elif storageScheme == 'gs':
conn = boto.storage_uri(bucketName, 'gs')
bucket = conn.get_bucket()
else:
raise NotImplementedError("No file reader implementation for URL scheme " + storageScheme)
keys = _BotoClient.retrieveKeys(bucket, parse[2], prefix=parse[3], postfix=parse[4], recursive=recursive)
keyNameList = [key.name for key in keys]
if ext:
keyNameList = [keyname for keyname in keyNameList if keyname.endswith(ext)]
keyNameList.sort()
keyNameList = selectByStartAndStopIndices(keyNameList, startIdx, stopIdx)
return storageScheme, bucket.name, keyNameList
def listFiles(self, dataPath, ext=None, startIdx=None, stopIdx=None, recursive=False):
storageScheme, bucketname, keyNames = self._listFilesImpl(dataPath, ext=ext, startIdx=startIdx, stopIdx=stopIdx,
recursive=recursive)
return ["%s:///%s/%s" % (storageScheme, bucketname, keyname) for keyname in keyNames]
def read(self, dataPath, ext=None, startIdx=None, stopIdx=None, recursive=False, npartitions=None):
"""Sets up Spark RDD across S3 or GS objects specified by dataPath.
Returns RDD of <string bucket keyname, string buffer> k/v pairs.
"""
dataPath = appendExtensionToPathSpec(dataPath, ext)
storageScheme, bucketName, keyNameList = self._listFilesImpl(dataPath, startIdx=startIdx, stopIdx=stopIdx, recursive=recursive)
if not keyNameList:
raise FileNotFoundError("No objects found for '%s'" % dataPath)
access, secret = self.awsCredentialsOverride.credentials
def readSplitFromBoto(kvIter):
if storageScheme == 's3' or storageScheme == 's3n':
conn = S3ConnectionWithAnon(access, secret)
bucket = conn.get_bucket(bucketName)
elif storageScheme == 'gs':
conn = boto.storage_uri(bucketName, 'gs')
bucket = conn.get_bucket()
else:
raise NotImplementedError("No file reader implementation for URL scheme " + storageScheme)
for kv in kvIter:
idx, keyName = kv
key = bucket.get_key(keyName)
buf = key.get_contents_as_string()
yield idx, buf
self.lastNRecs = len(keyNameList)
npartitions = min(npartitions, self.lastNRecs) if npartitions else self.lastNRecs
return self.sc.parallelize(enumerate(keyNameList), npartitions).mapPartitions(readSplitFromBoto)
class LocalFSFileReader(object):
"""File reader backed by python's native file() objects.
"""
def __init__(self, **kwargs):
# do nothing; allows AWS access keys to be passed in to a generic Reader instance w/o blowing up
pass
def __listRecursive(self, dataPath):
if os.path.isdir(dataPath):
dirname = dataPath
matchpattern = None
else:
dirname, matchpattern = os.path.split(dataPath)
filenames = set()
for root, dirs, files in os.walk(dirname):
if matchpattern:
files = fnmatch.filter(files, matchpattern)
for filename in files:
filenames.add(os.path.join(root, filename))
filenames = list(filenames)
filenames.sort()
return filenames
def list(self, dataPath, filename=None, startIdx=None, stopIdx=None, recursive=False,
includeDirectories=False):
"""List files specified by dataPath.
Datapath may include a single wildcard ('*') in the filename specifier.
Returns sorted list of absolute path strings.
"""
absPath = LocalFSParallelReader.uriToPath(dataPath)
if (not filename) and recursive:
return self.__listRecursive(absPath)
if filename:
if os.path.isdir(absPath):
absPath = os.path.join(absPath, filename)
else:
absPath = os.path.join(os.path.dirname(absPath), filename)
else:
if os.path.isdir(absPath) and not includeDirectories:
absPath = os.path.join(absPath, "*")
files = glob.glob(absPath)
# filter out directories
if not includeDirectories:
files = [fpath for fpath in files if not os.path.isdir(fpath)]
files.sort()
files = selectByStartAndStopIndices(files, startIdx, stopIdx)
return files
def read(self, dataPath, filename=None, startOffset=None, size=-1):
filenames = self.list(dataPath, filename=filename)
if not filenames:
raise FileNotFoundError("No file found matching: '%s'" % dataPath)
if len(filenames) > 1:
raise ValueError("Found multiple files matching: '%s'" % dataPath)
return _localRead(filenames[0], startOffset=startOffset, size=size)
def open(self, dataPath, filename=None):
filenames = self.list(dataPath, filename=filename)
if not filenames:
raise FileNotFoundError("No file found matching: '%s'" % dataPath)
if len(filenames) > 1:
raise ValueError("Found multiple files matching: '%s'" % dataPath)
return open(filenames[0], 'rb')
class BotoFileReader(_BotoClient):
"""File reader backed by the boto AWS client library.
"""
def __getMatchingKeys(self, dataPath, filename=None, includeDirectories=False, recursive=False):
parse = _BotoClient.parseQuery(dataPath)
storageScheme = parse[0]
bucketName = parse[1]
keyName = parse[2]
if storageScheme == 's3' or storageScheme == 's3n':
conn = S3ConnectionWithAnon(*self.awsCredentialsOverride.credentials)
bucket = conn.get_bucket(bucketName)
elif storageScheme == 'gs':
conn = boto.storage_uri(bucketName, 'gs')
bucket = conn.get_bucket()
else:
raise NotImplementedError("No file reader implementation for URL scheme " + storageScheme)
if filename:
# check whether last section of dataPath refers to a directory
if not keyName.endswith("/"):
if self.checkPrefix(bucket, keyName + "/"):
# keyname is a directory, but we've omitted the trailing "/"
keyName += "/"
else:
# assume keyname refers to an object other than a directory
# look for filename in same directory as keyname
slashIdx = keyName.rfind("/")
if slashIdx >= 0:
keyName = keyName[:(slashIdx+1)]
else:
# no directory separators, so our object is in the top level of the bucket
keyName = ""
keyName += filename
return (storageScheme, _BotoClient.retrieveKeys(bucket, keyName, prefix=parse[3], postfix=parse[4],
includeDirectories=includeDirectories, recursive=recursive))
def list(self, dataPath, filename=None, startIdx=None, stopIdx=None, recursive=False, includeDirectories=False):
"""List objects specified by dataPath.
Returns sorted list of 'gs://' or 's3n://' URIs.
"""
storageScheme, keys = self.__getMatchingKeys(dataPath, filename=filename,
includeDirectories=includeDirectories,
recursive=recursive)
keyNames = [storageScheme + ":///" + key.bucket.name + "/" + key.name for key in keys]
keyNames.sort()
keyNames = selectByStartAndStopIndices(keyNames, startIdx, stopIdx)
return keyNames
def __getSingleMatchingKey(self, dataPath, filename=None):
storageScheme, keys = self.__getMatchingKeys(dataPath, filename=filename)
# keys is probably a lazy-loading ifilter iterable
try:
key = keys.next()
except StopIteration:
raise FileNotFoundError("Could not find object for: '%s'" % dataPath)
# we expect to only have a single key returned
nextKey = None
try:
nextKey = keys.next()
except StopIteration:
pass
if nextKey:
raise ValueError("Found multiple keys for: '%s'" % dataPath)
return storageScheme, key
def read(self, dataPath, filename=None, startOffset=None, size=-1):
storageScheme, key = self.__getSingleMatchingKey(dataPath, filename=filename)
if startOffset or (size > -1):
# specify Range header in boto request
# see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
# and: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
if not startOffset:
startOffset = 0
if size > -1:
sizeStr = startOffset + size - 1 # range header is inclusive
else:
sizeStr = ""
hdrs = {"Range": "bytes=%d-%s" % (startOffset, sizeStr)}
return key.get_contents_as_string(headers=hdrs)
else:
return key.get_contents_as_string()
def open(self, dataPath, filename=None):
storageScheme, key = self.__getSingleMatchingKey(dataPath, filename=filename)
return BotoReadFileHandle(storageScheme, key)
class BotoReadFileHandle(object):
"""Read-only file handle-like object exposing a subset of file methods.
Returned by BotoFileReader's open() method.
"""
def __init__(self, storageScheme, key):
self._storageScheme = storageScheme
self._key = key
self._closed = False
self._offset = 0
def close(self):
try:
self._key.close(fast=True)
except TypeError:
# workaround for early versions of boto that don't have the 'fast' keyword
self._key.close()
self._closed = True
def read(self, size=-1):
if self._offset or (size > -1):
# return empty string to indicate EOF if we are offset past the end of the file
# else boto will throw an error at us
if self._offset >= self._key.size:
return ""
if size > -1:
sizeStr = str(self._offset + size - 1) # range header is inclusive
else:
sizeStr = ""
hdrs = {"Range": "bytes=%d-%s" % (self._offset, sizeStr)}
else:
hdrs = {}
buf = self._key.get_contents_as_string(headers=hdrs)
self._offset += len(buf)
return buf
def seek(self, offset, whence=0):
if whence == 0:
self._offset = offset
elif whence == 1:
self._offset += offset
elif whence == 2:
self._offset = self._key.size + offset
else:
raise IOError("Invalid 'whence' argument, must be 0, 1, or 2. See file().seek.")
def tell(self):
return self._offset
@property
def closed(self):
return self._closed
@property
def name(self):
return self._storageScheme + ":///" + self._key.bucket.name + "/" + self._key.name
@property
def mode(self):
return "rb"
SCHEMAS_TO_PARALLELREADERS = {
'': LocalFSParallelReader,
'file': LocalFSParallelReader,
'gs': BotoParallelReader,
's3': BotoParallelReader,
's3n': BotoParallelReader,
'hdfs': None,
'http': None,
'https': None,
'ftp': None
}
SCHEMAS_TO_FILEREADERS = {
'': LocalFSFileReader,
'file': LocalFSFileReader,
'gs': BotoFileReader,
's3': BotoFileReader,
's3n': BotoFileReader,
'hdfs': None,
'http': None,
'https': None,
'ftp': None
}
def getByScheme(dataPath, lookup, default):
"""Helper function used by get*ForPath().
"""
if hasattr(dataPath, '__iter__'):
clazz = LocalFSParallelReader
else:
parseresult = urlparse.urlparse(dataPath)
clazz = lookup.get(parseresult.scheme, default)
if clazz is None:
raise NotImplementedError("No implementation for scheme " + parseresult.scheme)
return clazz
def getParallelReaderForPath(dataPath):
"""Returns the class of a parallel reader suitable for the scheme used by `dataPath`.
The resulting class object must still be instantiated in order to get a usable instance of the class.
Throws NotImplementedError if the requested scheme is explicitly not supported (e.g. "ftp://").
Returns LocalFSParallelReader if scheme is absent or not recognized.
"""
return getByScheme(dataPath, SCHEMAS_TO_PARALLELREADERS, LocalFSParallelReader)
def getFileReaderForPath(dataPath):
"""Returns the class of a file reader suitable for the scheme used by `dataPath`.
The resulting class object must still be instantiated in order to get a usable instance of the class.
Throws NotImplementedError if the requested scheme is explicitly not supported (e.g. "ftp://").
Returns LocalFSFileReader if scheme is absent or not recognized.
"""
return getByScheme(dataPath, SCHEMAS_TO_FILEREADERS, LocalFSFileReader)
|
|
from array import array
from heapdict import heapdict
from .win_detector import WinDetector
from .color import IllegalAction, COLOR_NONE, COLOR_BLACK, COLOR_WHITE, ORIENTATION, COLOR_SYMBOLS, \
NUM_PLAYERS, color_to_player, next_player, player_to_color, cell_str, \
cell_str_to_cell
def prod(*l):
s = 1
for e in l:
s *= e
return s
class Board(object):
_NEIGHBOR_PATTERNS = (
(-1, 0), # North
(0, -1), # West
(-1, 1), # Northeast
(0, 1), # East
(1, 0), # South
(1, -1) # Southwest
)
EDGES = (-1, -2)
def __init__(self, *dimensions):
self._dimensions = list(dimensions)
if len(self._dimensions) < 1:
self._dimensions.append(10)
if len(self._dimensions) < 2:
self._dimensions.append(self._dimensions[0])
self._cells = array('I', [COLOR_NONE] * len(self))
self._actions = [[] for _ in range(NUM_PLAYERS)]
self._empty_cells = {}
self._my_cells = []
for player in range(NUM_PLAYERS):
self._my_cells.append({})
for a in range(len(self)):
self._empty_cells[a] = True
def __len__(self):
return prod(*self.size())
def is_empty(self, *cell):
return self.cell_index(*cell) in self._empty_cells
def color(self, row, column):
return self._cells[self.cell_index(row, column)]
def my_cells(self, player):
return self._my_cells[player].keys()
def is_color(self, player, row, column, color):
return self._cells[self.cell_index(row, column)] == color
def size(self):
return self._dimensions
def num_rows(self):
return self.size()[0]
def num_columns(self):
return self.size()[1]
def cell_index(self, row, column):
return (column * self.num_rows()) + row
def row(self, cell_index):
return cell_index % self.num_rows()
def column(self, cell_index):
return cell_index // self.num_rows()
def empty_cells(self):
for action in self._empty_cells.keys():
yield self.row(action), self.column(action)
def cells(self):
for row in range(self.num_columns()):
for column in range(self.num_rows()):
yield row, column
def with_action_applied(self, action, player):
color = self.play(action, player)
yield color
self.undo(player)
def last_action(self, player):
if len(self._actions[player]) > 0:
return self._actions[player][-1]
def undo(self, player):
'''Undo `player`'s last action.'''
action = self._actions[player].pop()
self._cells[action] = COLOR_NONE
self._empty_cells[action] = True
del self._my_cells[player][action]
return action
def play(self, action, player):
'''Apply `action` on behalf of `player`.
Returns the color of the cell after attempting to set it to the
player's color.
'''
color = player_to_color(player)
opponent = next_player(player)
# Already has a stone.
if self._cells[action] != COLOR_NONE:
raise IllegalAction(
("Attempted to place for {} but collided with {} stone already"
" on cell {}").format(
color, color, (self.row(action), self.column(action))
)
)
else: # Set to player's color
self._cells[action] = color
self._my_cells[player][action] = True
self._actions[player].append(action)
del self._empty_cells[action]
return color
def num_legal_actions(self):
return len(self._empty_cells)
def legal_actions(self):
for action in self._empty_cells.keys():
yield action
def is_valid_cell(self, row, column):
return (
0 <= row and row < self.num_rows() and
0 <= column and column < self.num_columns()
)
def every_neighbor(self, row, column):
"""Generate neighbors of `cell`."""
for (neighbor_row_offset,
neighbor_column_offset) in self._NEIGHBOR_PATTERNS:
neighbor_row = row + neighbor_row_offset
neighbor_column = column + neighbor_column_offset
if self.is_valid_cell(neighbor_row, neighbor_column):
yield (neighbor_row, neighbor_column)
def every_legal_neighbor(self, *cell):
"""Generate neighbors of `cell` that can be played on by `player`."""
for neighbor in self.every_neighbor(*cell):
if self.cell_index(*neighbor) in self._empty_cells:
yield neighbor
def border_cells(self, player, edge):
"""Return a list of cells bordering edge for player."""
cells = []
if edge == self.EDGES[0]:
for i in range(self.size()[next_player(player)]):
if player == COLOR_BLACK:
cells.append((0, i))
else:
cells.append((i, 0))
else:
for i in range(self.size()[next_player(player)]):
if player == COLOR_BLACK:
cells.append((self.size()[player] - 1, i))
else:
cells.append((i, self.size()[player] - 1))
return cells
def connected_neighbors(self, cell, player, seen=None):
"""
Yield all empty cells connected to cell.
Connected cells are adjacent or connected to cell by cells of player's
color.
"""
if seen is None:
seen = set()
seen.add(cell)
if cell in self.EDGES:
cells = self.border_cells(player, cell)
for edge_cell in cells:
if edge_cell not in seen:
seen.add(edge_cell)
if self.cell_index(*edge_cell) in self._empty_cells:
yield edge_cell
elif self.cell_index(*edge_cell) in self._my_cells[player]:
for connected_cell in self.connected_neighbors(
edge_cell, player, seen):
yield connected_cell
return
for neighbor in self.every_neighbor(*cell):
if neighbor not in seen:
seen.add(neighbor)
if self.cell_index(*neighbor) in self._empty_cells:
yield neighbor
elif self.cell_index(*neighbor) in self._my_cells[player]:
for connected_cell in self.connected_neighbors(
neighbor, player, seen):
yield connected_cell
if cell[player] == 0:
yield self.EDGES[0]
if cell[player] == self.size()[player] - 1:
yield self.EDGES[1]
def length_separating_goal_sides(self, player):
return self.size()[player]
def __str__(self):
"""Return an ASCII representation."""
return self._to_s(self.color)
def _to_s(self, get_color_fn):
"""Return an ASCII representation."""
ret = '\n'
coord_size = len(str(self.num_rows()))
offset = 1
ret += ' ' * (offset + 1)
for x in range(self.num_columns()):
ret += chr(ord('A') + x) + ' ' * offset * 2
ret = ret.rstrip() + '\n'
for y in range(self.num_rows()):
ret += str(y + 1) + ' ' * (offset * 2 +
coord_size - len(str(y + 1)))
for x in range(self.num_columns()):
ret += COLOR_SYMBOLS[get_color_fn(y, x)]
ret += ' ' * offset * 2
ret += COLOR_SYMBOLS[COLOR_WHITE]
ret = ret.rstrip() + "\n" + ' ' * offset * (y + 1)
ret += (
(' ' * (offset * 2 + 1)) +
(COLOR_SYMBOLS[COLOR_BLACK] + ' ' * offset * 2) *
self.num_columns()
)
return ret.rstrip()
def dijkstra_distance(self, player, source, destination):
"""
Return the two distance between source and destination for player.
The two distance is 0 if source = destination, 1 if source is adjacent
to destination, and 1 + the second smallest two distance between
source and destination's neighbors otherwise.
"""
cell_set = heapdict()
second = {}
for cell in self.empty_cells():
cell_set[cell] = float("INF")
second[cell] = float("INF")
for edge in self.EDGES:
cell_set[edge] = float("INF")
second[cell] = float("INF")
cell_set[source] = 0
second[source] = 0
while cell_set:
cell, distance = cell_set.popitem()
if cell == destination:
return second[cell]
for neighbor in self.connected_neighbors(cell, player):
if neighbor not in cell_set:
continue
if cell == source:
cell_set[neighbor] = 1
second[neighbor] = 1
else:
alternate = distance + 1
if alternate <= cell_set[neighbor]:
second[neighbor] = cell_set[neighbor]
cell_set[neighbor] = alternate
return second[destination]
class GameState(object):
"""Represents the current state of a game of hex."""
@classmethod
def clean_board(self, *dimensions):
return Board(*dimensions)
@classmethod
def root(self, *dimensions):
"""Initialize the game board and give White first turn."""
if len(dimensions) < 1:
num_rows = 6
else:
num_rows = dimensions[0]
num_columns = num_rows if len(dimensions) < 2 else dimensions[1]
return self(
color_to_player(COLOR_WHITE),
self.clean_board(num_rows, num_columns),
WinDetector.root(NUM_PLAYERS)
)
def reset(self):
"""Reset the board."""
player = color_to_player(COLOR_WHITE)
board = self.clean_board(*self.board.size())
win_detector = WinDetector.root(NUM_PLAYERS)
self.__init__(player, board, self.win_detector)
def __init__(self, acting_player, board, win_detector):
self._acting_player = acting_player
self._previous_acting_players = []
self.board = board
self.win_detector = win_detector
self._potentially_winning_moves = None
def __getitem__(self, cell):
return self.board.color(*cell)
def last_action(self):
if len(self._previous_acting_players) > 0:
return self.board.last_action(self._previous_acting_players[-1])
def player_who_acted_last(self):
return self._previous_acting_players[-1] \
if self._previous_acting_players \
else None
def could_terminate_in_one_action(self, player=None):
'''
Returns whether or not `player` can end the game in one
action.
`player`: Player index, {0, 1}. Defaults to `player_to_act`.
'''
return (self.num_actions_taken(self._acting_player)
>= (self.board.length_separating_goal_sides(
self._acting_player) - 1))
def potentially_winning_moves(self):
if self._potentially_winning_moves is not None:
for m in self._potentially_winning_moves.keys():
yield m
else:
self._potentially_winning_moves = {}
if self.could_terminate_in_one_action():
original_group = (self.win_detector
.current_groups[self._acting_player]
.copy_raw())
color = player_to_color(self._acting_player)
for my_cell_index in self.board.my_cells(self._acting_player):
for cell in self.board.every_legal_neighbor(
self._acting_player,
self.board.row(my_cell_index),
self.board.column(my_cell_index)):
a = self.board.cell_index(*cell)
if a not in self._potentially_winning_moves:
self.win_detector.imagined_update(
self.board, self._acting_player, cell, a)
if self.win_detector.check_if_winner(
self._acting_player):
(self.win_detector
.current_groups[self._acting_player]
.copy_from_raw(original_group))
self.win_detector._winner = COLOR_NONE
self._potentially_winning_moves[a] = True
yield a
else:
(self.win_detector
.current_groups[self._acting_player]
.copy_from_raw(original_group))
def is_empty(self, cell):
return self.board.is_empty(*cell)
def num_actions_taken(self, player):
return len(self.win_detector.history_of_player_groups[player])
def with_action_applied(self, action):
'''Not meant to be used recursively.
This is a faster version of #play that does less bookkeeping.
Useful when one wants to examine the state after playing but
does not need to do this recursively or check if the game has ended.
'''
for _ in self.board.with_action_applied(action, self._acting_player):
yield
def previous_action_was_a_collision(self, previous_player=None):
if previous_player is None:
previous_player = self._previous_acting_players[-1]
return previous_player == self._acting_player
def undo(self):
action = None
if self._previous_acting_players:
previous_player = self._previous_acting_players.pop()
if previous_player != self._acting_player:
self.win_detector.undo(previous_player)
self._acting_player = previous_player
action = self.board.undo(previous_player)
self._potentially_winning_moves = None
return action
def play(self, action):
'''Apply the given action.
`action` must be in the set of legal actions
(see `legal_actions`).
Return `self`.
'''
self.place(action, self._acting_player)
return self
def __enter__(self):
'''Allows the following type of code:
```
with state.play(action):
# Do something with `state` after `action`
# has been applied to `state`.
# `action` has automatically be undone.
'''
pass
def __exit__(self,
exception_type,
exception_val,
exception_traceback):
'''Allows the following type of code:
```
with state.play(action):
# Do something with `state` after `action`
# has been applied to `state`.
# `action` has automatically be undone.
'''
self.undo()
def place(self, action, player):
"""
Place a stone for the given player regardless of whose turn it is.
"""
if self.is_terminal():
raise IllegalAction(
"Game has finished and {} is the winner".format(self.winner())
)
color_after_placing = self.board.play(action, player)
self._previous_acting_players.append(player)
self._update_state(action, player, color_after_placing)
self._potentially_winning_moves = None
def score(self, player):
if self.is_terminal():
return 1 if player == self.winner() else -1
else:
return None
def winner(self):
return self.win_detector.winner()
def is_terminal(self):
return self.win_detector.is_terminal()
def player_to_act(self):
return self._acting_player
def set_player_to_act(self, player):
self._acting_player = player
def _update_state(
self, action, viewing_player, color_of_cell_that_changed):
color = player_to_color(viewing_player)
if color_of_cell_that_changed == color:
self.win_detector.update(
self.board,
viewing_player,
(self.board.row(action), self.board.column(action)),
action
)
self._acting_player = next_player(viewing_player)
def legal_actions(self):
for a in self.board.legal_actions():
yield a
def num_legal_actions(self):
return 0 if self.is_terminal() else self.board.num_legal_actions()
def __str__(self):
"""Print an ascii representation of the game board."""
return str(self.board)
def heuristic(self, player):
"""
Return a heuristic for player based on the two distance.
The value is between -1 and 1, with higher values representing better
states for player.
"""
dist1 = self.board.dijkstra_distance(player, -1, -2)
dist2 = self.board.dijkstra_distance(player, -2, -1)
opponent1 = self.board.dijkstra_distance(next_player(player), -1, -2)
opponent2 = self.board.dijkstra_distance(next_player(player), -2, -1)
result = min(opponent1, opponent2) - min(dist1, dist2)
limit = min(self.board.size())
result = max(-limit, min(limit, result))
return 1.0 * result / limit
|
|
"""Pylons Decorators
Common decorators intended for use in controllers. Additional
decorators for use with controllers are in the
:mod:`~pylons.decorators.cache`, :mod:`~pylons.decorators.rest` and
:mod:`~pylons.decorators.secure` modules.
"""
import logging
import sys
import warnings
import formencode
import simplejson
from decorator import decorator
from formencode import api, htmlfill, variabledecode
from webob import UnicodeMultiDict
from pylons.decorators.util import get_pylons
from pylons.i18n import _ as pylons_gettext
__all__ = ['jsonify', 'validate']
log = logging.getLogger(__name__)
def jsonify(func, *args, **kwargs):
"""Action decorator that formats output for JSON
Given a function that will return content, this decorator will turn
the result into JSON, with a content-type of 'application/json' and
output it.
"""
pylons = get_pylons(args)
pylons.response.headers['Content-Type'] = 'application/json'
data = func(*args, **kwargs)
if isinstance(data, (list, tuple)):
msg = "JSON responses with Array envelopes are susceptible to " \
"cross-site data leak attacks, see " \
"http://pylonshq.com/warnings/JSONArray"
warnings.warn(msg, Warning, 2)
log.warning(msg)
log.debug("Returning JSON wrapped action output")
return simplejson.dumps(data)
jsonify = decorator(jsonify)
def validate(schema=None, validators=None, form=None, variable_decode=False,
dict_char='.', list_char='-', post_only=True, state=None,
on_get=False, **htmlfill_kwargs):
"""Validate input either for a FormEncode schema, or individual
validators
Given a form schema or dict of validators, validate will attempt to
validate the schema or validator list.
If validation was successful, the valid result dict will be saved
as ``self.form_result``. Otherwise, the action will be re-run as if
it was a GET, and the output will be filled by FormEncode's
htmlfill to fill in the form field errors.
``schema``
Refers to a FormEncode Schema object to use during validation.
``form``
Method used to display the form, which will be used to get the
HTML representation of the form for error filling.
``variable_decode``
Boolean to indicate whether FormEncode's variable decode
function should be run on the form input before validation.
``dict_char``
Passed through to FormEncode. Toggles the form field naming
scheme used to determine what is used to represent a dict. This
option is only applicable when used with variable_decode=True.
``list_char``
Passed through to FormEncode. Toggles the form field naming
scheme used to determine what is used to represent a list. This
option is only applicable when used with variable_decode=True.
``post_only``
Boolean that indicates whether or not GET (query) variables
should be included during validation.
.. warning::
``post_only`` applies to *where* the arguments to be
validated come from. It does *not* restrict the form to
only working with post, merely only checking POST vars.
``state``
Passed through to FormEncode for use in validators that utilize
a state object.
``on_get``
Whether to validate on GET requests. By default only POST
requests are validated.
Example::
class SomeController(BaseController):
def create(self, id):
return render('/myform.mako')
@validate(schema=model.forms.myshema(), form='create')
def update(self, id):
# Do something with self.form_result
pass
"""
if state is None:
state = PylonsFormEncodeState
def wrapper(func, self, *args, **kwargs):
"""Decorator Wrapper function"""
request = self._py_object.request
errors = {}
# Skip the validation if on_get is False and its a GET
if not on_get and request.environ['REQUEST_METHOD'] == 'GET':
return func(self, *args, **kwargs)
# If they want post args only, use just the post args
if post_only:
params = request.POST
else:
params = request.params
is_unicode_params = isinstance(params, UnicodeMultiDict)
params = params.mixed()
if variable_decode:
log.debug("Running variable_decode on params")
decoded = variabledecode.variable_decode(params, dict_char,
list_char)
else:
decoded = params
if schema:
log.debug("Validating against a schema")
try:
self.form_result = schema.to_python(decoded, state)
except formencode.Invalid, e:
errors = e.unpack_errors(variable_decode, dict_char, list_char)
if validators:
log.debug("Validating against provided validators")
if isinstance(validators, dict):
if not hasattr(self, 'form_result'):
self.form_result = {}
for field, validator in validators.iteritems():
try:
self.form_result[field] = \
validator.to_python(decoded.get(field), state)
except formencode.Invalid, error:
errors[field] = error
if errors:
log.debug("Errors found in validation, parsing form with htmlfill "
"for errors")
request.environ['REQUEST_METHOD'] = 'GET'
self._py_object.c.form_errors = errors
# If there's no form supplied, just continue with the current
# function call.
if not form:
return func(self, *args, **kwargs)
request.environ['pylons.routes_dict']['action'] = form
response = self._dispatch_call()
# XXX: Legacy WSGIResponse support
legacy_response = False
if hasattr(response, 'content'):
form_content = ''.join(response.content)
legacy_response = True
else:
form_content = response
response = self._py_object.response
# If the form_content is an exception response, return it
if hasattr(form_content, '_exception'):
return form_content
# Ensure htmlfill can safely combine the form_content, params and
# errors variables (that they're all of the same string type)
if not is_unicode_params:
log.debug("Raw string form params: ensuring the '%s' form and "
"FormEncode errors are converted to raw strings for "
"htmlfill", form)
encoding = determine_response_charset(response)
# WSGIResponse's content may (unlikely) be unicode
if isinstance(form_content, unicode):
form_content = form_content.encode(encoding,
response.errors)
# FormEncode>=0.7 errors are unicode (due to being localized
# via ugettext). Convert any of the possible formencode
# unpack_errors formats to contain raw strings
errors = encode_formencode_errors(errors, encoding,
response.errors)
elif not isinstance(form_content, unicode):
log.debug("Unicode form params: ensuring the '%s' form is "
"converted to unicode for htmlfill", form)
encoding = determine_response_charset(response)
form_content = form_content.decode(encoding)
form_content = htmlfill.render(form_content, defaults=params,
errors=errors, **htmlfill_kwargs)
if legacy_response:
# Let the Controller merge the legacy response
response.content = form_content
return response
else:
return form_content
return func(self, *args, **kwargs)
return decorator(wrapper)
def determine_response_charset(response):
"""Determine the charset of the specified Response object,
returning the default system encoding when none is set"""
charset = response.charset
if charset is None:
charset = sys.getdefaultencoding()
log.debug("Determined result charset to be: %s", charset)
return charset
def encode_formencode_errors(errors, encoding, encoding_errors='strict'):
"""Encode any unicode values contained in a FormEncode errors dict
to raw strings of the specified encoding"""
if errors is None or isinstance(errors, str):
# None or Just incase this is FormEncode<=0.7
pass
elif isinstance(errors, unicode):
errors = errors.encode(encoding, encoding_errors)
elif isinstance(errors, dict):
for key, value in errors.iteritems():
errors[key] = encode_formencode_errors(value, encoding,
encoding_errors)
else:
# Fallback to an iterable (a list)
errors = [encode_formencode_errors(error, encoding, encoding_errors)
for error in errors]
return errors
def pylons_formencode_gettext(value):
"""Translates a string ``value`` using pylons gettext first and if
that fails, formencode gettext.
This allows to "merge" localized error messages from built-in
FormEncode's validators with application-specific validators.
"""
trans = pylons_gettext(value)
if trans == value:
# translation failed, try formencode
trans = api._stdtrans(value)
return trans
class PylonsFormEncodeState(object):
"""A ``state`` for FormEncode validate API that includes smart
``_`` hook.
The FormEncode library used by validate() decorator has some
provision for localizing error messages. In particular, it looks
for attribute ``_`` in the application-specific state object that
gets passed to every ``.to_python()`` call. If it is found, the
``_`` is assumed to be a gettext-like function and is called to
localize error messages.
One complication is that FormEncode ships with localized error
messages for standard validators so the user may want to re-use
them instead of gathering and translating everything from scratch.
To allow this, we pass as ``_`` a function which looks up
translation both in application and formencode message catalogs.
"""
_ = staticmethod(pylons_formencode_gettext)
|
|
import pdb
import numpy as np
from astropy.wcs import WCS
from shift import shift_twod
from VieroLibrary.dist_idl import dist_idl
from lmfit import Parameters, minimize, fit_report
from smoothmap import smoothmap
from gauss_kern import gauss_kern
def simultaneous_stack_array_oned(p, layers_1d, data1d, err1d = None):
''' Function to Minimize written specifically for lmfit '''
v = p.valuesdict()
len_model = len(data1d)
nlayers = len(layers_1d)/len_model
model = np.zeros(len_model)
for i in range(nlayers):
model[:] += layers_1d[i*len_model:(i+1)*len_model] * v['layer'+str(i)]
if err1d is None:
return (data1d - model)
return (data1d - model)/err1d
def simultaneous_stack_array(p, layers_2d, data, err = None):
''' Function to Minimize written specifically for lmfit '''
v = p.valuesdict()
csize = np.shape(layers_2d)
model = np.zeros(csize[1])
for i in range(csize[0]):
model += layers_2d[i,:] * v['layer'+str(i)]
if err is None:
return (data - model)
return (data - model)/err
def circle_mask(pixmap,radius_in,pixres):
''' Makes a 2D circular image of zeros and ones'''
radius=radius_in/pixres
xy = np.shape(pixmap)
xx = xy[0]
yy = xy[1]
beforex = np.log2(xx)
beforey = np.log2(yy)
if beforex != beforey:
if beforex > beforey:
before = beforex
else:
before = beforey
else: before = beforey
l2 = np.ceil(before)
pad_side = 2.0 ** l2
outmap = np.zeros([pad_side, pad_side])
outmap[:xx,:yy] = pixmap
dist_array = shift_twod(dist_idl(pad_side, pad_side), pad_side/2, pad_side/2)
circ = np.zeros([pad_side, pad_side])
ind_one = np.where(dist_array <= radius)
circ[ind_one] = 1.
mask = np.real( np.fft.ifft2( np.fft.fft2(circ) *
np.fft.fft2(outmap))
) * pad_side * pad_side
mask = np.round(mask)
ind_holes = np.where(mask >= 1.0)
mask = mask * 0.
mask[ind_holes] = 1.
maskout = shift_twod(mask, pad_side/2, pad_side/2)
return maskout[:xx,:yy]
def stack_in_redshift_slices(
cmap,
hd,
layers_radec,
fwhm,
cnoise=None,
mask=None,
beam_area=None,
err_ss=None,
quiet=None):
w = WCS(hd)
#FIND SIZES OF MAP AND LISTS
cms = np.shape(cmap)
zeromask = np.zeros(cms)
size_cube = np.shape(layers_radec)
nsrcmax = size_cube[0]
nlists = int(size_cube[1])
ind_map_zero = np.where(np.isnan(cmap))
nzero = np.shape(ind_map_zero)[1]
if np.sum(cnoise) == 0: cnoise=cmap*0.0 + 1.0
pix=hd["CD2_2"]*3600.
if pix == 0: pix=hd["CDELT2"]*3600.
#[STEP 0] - Calibrate maps
if beam_area != None:
cmap=cmap*beam_area*1e6
cnoise=noise*beam_area*1e6
# STEP 1 - Make Layers Cube
layers=np.zeros([nlists,cms[0],cms[1]])
for s in range(nlists):
ind_src = np.where(layers_radec[:,s,0] != 0)
#print np.shape(ind_src)[1]
if np.shape(ind_src)[1] > 0:
ra = layers_radec[ind_src,s,0]
dec = layers_radec[ind_src,s,1]
# CONVERT FROM RA/DEC to X/Y
# DANGER!! NOTICE THAT I FLIP X AND Y HERE!!
# GJS: the last argument sets the origin. I compared the goodness of match between the code output and
# the locations of 250um selected sources from the SPIRE 250 StarFinder catalog. It looks 1 should
# be the better option.
ty,tx = w.wcs_world2pix(ra, dec, 1)# WHAT IS THE DIFFERENCE BETWEEN 0 AND 1???!!!
#ty,tx = w.wcs_world2pix(ra, dec, 0)# NOTICE I FLIPPED X AND Y AND NO LONGER TRANSPOSE!
# CHECK FOR SOURCES THAT FALL OUTSIDE MAP
#ind_keep = np.where((tx[0] >= 0) & (np.floor(tx[0]) < cms[0]) & (ty[0] >= 0) & (np.floor(ty[0]) < cms[1]))
ind_keep = np.where((tx[0] >= 0) & (np.floor(tx[0]) < cms[0]-1) & (ty[0] >= 0) & (np.floor(ty[0]) < cms[1]-1))
nt0 = np.shape(ind_keep)[1]
#real_x=np.floor(tx[0,ind_keep][0]).astype(int)
#real_y=np.floor(ty[0,ind_keep][0]).astype(int)
# GJS: Lorenzo and I think these coordinates should be rounded to the closest integer...
# Line 126 is modified to accommodate this...
real_x=np.round(tx[0,ind_keep][0],0).astype(int)
real_y=np.round(ty[0,ind_keep][0],0).astype(int)
# CHECK FOR SOURCES THAT FALL ON ZEROS MAP
if nzero > 0:
tally = np.zeros(nt0)
for d in range(nt0):
if cmap[real_x[d],real_y[d]] != 0:
tally[d]=1.
ind_nz=np.where(tally == 1)
nt = np.shape(ind_nz)[1]
real_x = real_x[ind_nz]
real_y = real_y[ind_nz]
else: nt = nt0
for ni in range(nt):
layers[s, real_x[ni],real_y[ni]]+=1.0
# STEP 2 - Convolve Layers and put in pixels
radius = 1.1
sig = fwhm / 2.355 / pix
flattened_pixmap = np.sum(layers,axis=0)
total_circles_mask = circle_mask(flattened_pixmap, radius * fwhm, pix)
ind_fit = np.where(total_circles_mask >= 1) # & zeromask != 0)
nhits = np.shape(ind_fit)[1]
cfits_maps = np.zeros([nlists,nhits])
kern = gauss_kern(fwhm, np.floor(fwhm * 10), pix)
for u in range(nlists):
layer = layers[u,:,:]
#layer = np.transpose(layers[u,:,:]) ## DANGER!! Transpose NO LONGER required AFTER FLIPPING x and y!!!!!!!
#tmap = gaussian_filter(layer, sig)
tmap = smoothmap(layer, kern)
tmap -= np.mean(tmap[ind_fit])
cfits_maps[u,:] = tmap[ind_fit]
# STEP 3 - Regress Layers with Map (i.e., stack!)
cmap[ind_fit] -= np.mean(cmap[ind_fit], dtype=np.float32)
fit_params = Parameters()
for iarg in range(nlists):
fit_params.add('layer'+str(iarg),value= 1e-3*np.random.randn())
imap = cmap[ind_fit]
ierr = cnoise[ind_fit]
#cov_ss = minimize(simultaneous_stack_array, fit_params, args=(cfits_maps,), kws={'data':imap,'err':ierr})
##cov_ss = minimize(simultaneous_stack_array, fit_params, args=(cfits_maps,imap,ierr))
cov_ss_1d = minimize(simultaneous_stack_array_oned, fit_params,
args=(np.ndarray.flatten(cfits_maps),), kws={'data1d':np.ndarray.flatten(imap),'err1d':np.ndarray.flatten(ierr)})
#args=(np.ndarray.flatten(cfits_maps), np.ndarray.flatten(imap),np.ndarray.flatten(ierr)))
# STEP 4 - Returns a minimizer object
#pdb.set_trace()
return cov_ss_1d
|
|
import os
from os.path import join, abspath
import numpy as np
import itertools
import h5py
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from bigstats.hist import RHist
from simfMRI.io import read_hdf_inc, get_model_meta, get_model_names
def noise_spectrum(noise, name=None):
"""TODO"""
pass
# import simfMRI; simfMRI.analysis.plot.random_timecourses_all_models("rw_5000_learn.hdf5", 10, 5000, "rw_5000_learn")
def random_timecourses_all_models(hdf, N, nsim, basename):
""" Plot <N> randomly selected BOLD and design matrix timecourses
from <nsim> options for all models in <hdf>.
Each model's plots are saved as a pdf, prefixed with tc_<basename>. """
# Make a list of the models
# to plot and plot them
models = get_model_names(hdf)
for mod in models:
print("Plotting {0}.".format(mod))
random_timecourses(hdf, mod, N, nsim, "tc_"+basename+"_"+mod)
def random_timecourses(hdf, model, N, nsim, name=None):
""" Plot <N> randomly selected BOLD and design matrix timecourses
from <nsim> options for <model> from <hdf>.
If <name> is not None, the results are saved as a pdf. """
# Open a handle the hdf file
f = h5py.File(hdf,'r')
# Create (or use) a handle to a multi-page pdf
if name != None:
if isinstance(name, PdfPages):
pdf = name ## Use
else:
pdf = PdfPages('{0}.pdf'.format(name)) ## Create
# Randomly select sims between 0-nsim
# And mae seperate plots for each
# adding them to the same pdf.
selected = np.random.randint(0, nsim, N)
for sel in selected:
# Get metadata
meta = get_model_meta(hdf, model)
# Create a figure window
fig = plt.figure()
ax = fig.add_subplot(111)
# Get the data
dm = f[os.path.join("/", str(sel), model, "dm")].value
bold = f[os.path.join("/", str(sel), model, "bold")].value
# PLot it
ax.plot(dm)
ax.plot(bold, color="grey")
# Pretty up the plot
boldleg = ["".join( ["BOLD: ", ] + meta["bold"].tolist()), ]
plt.legend(meta["dm"] + boldleg)
plt.title('Timecourse {0} from {1}.'.format(sel, model))
## TODO add labels?
if name != None:
# Add to pdf...
plt.savefig(pdf, format="pdf")
if name != None:
pdf.close()
def hist_t_all_models(path, hdf, basename):
""" Given a <path> and the <hdf> name, plot and save all the models in
the <hdf>, prefixing each with <basename>.
"""
# Create a handle to create a multi-page pdf
# for the mod plots
pdf = PdfPages(os.path.join(path, '{0}.pdf'.format(basename)))
# Make a list of the models
# to plot and plot them
hdfpath = os.path.join(path, hdf)
models = get_model_names(hdfpath)
for mod in models:
print("Plotting {0}.".format(mod))
hist_t(hdfpath, mod, pdf)
pdf.close()
def hist_t(hdf,model,name=None):
"""
Plot histograms of the t values in <hdf> for each condition in
<model>.
If <name> is not None the plot is saved as <name>.pdf.
"""
meta = get_model_meta(hdf, model)
hist_list = []
for dm_col in meta['dm']:
# Make an instance RHist for the list.
hist = RHist(name=dm_col,decimals=1)
hist_list.append(hist)
# read_hdf_inc returns a generator so....
tdata = read_hdf_inc(hdf,'/'+ model + '/t')
for ts in tdata:
# get the tvals for each instance of model
# and add them to the hist_list,
[hist_list[ii].add(ts[ii]) for ii in range(len(ts)-1)]
## The last t in ts is the constant, which we
## do not want to plot.
# Create a fig, loop over the hist_list
# plotting each on fig.axes = 0.
fig = plt.figure()
fig.add_subplot(111)
colors = itertools.cycle(
['DarkGray', 'DarkBlue', 'DarkGreen', 'MediumSeaGreen'])
## Using html colors...
[h.plot(fig=fig,color=colors.next(),norm=True) for h in hist_list]
# Prettify the plot
ax = fig.axes[0]
ax.set_xlabel('t-values')
ax.set_ylabel('P(t)')
# Add vetical lines representing significance tresholds
ax.axvline(x=1.7822,label='p < 0.05',color='red',linewidth=4)
ax.axvline(x=2.6810,label='p < 0.01',color='red',linewidth=3)
ax.axvline(x=3.0545,label='p < 0.005',color='red',linewidth=2)
ax.axvline(x=4.3178,label='p < 0.0005',color='red',linewidth=1)
## tval lines assume N=12 subjects
plt.xlim(-10,15)
plt.legend()
plt.title('{0} -- BOLD: {1}'.format(model,meta['bold']))
if name != None:
plt.savefig(name, format="pdf")
# FROM MASTER:
# """ A set of plotting routines for simfMRI.exp results objects"""
# import matplotlib.pyplot as plt
# import numpy as np
# import simfMRI
# frac overlap, howto calc?
# quant how?
# mean beta w/ CI?
# boxplot instead?
# Asses noise effect how?
# code to do plots of hrf/raw data
# what does the corr w acc tell us
# anything that generalizes?
# make sure unit is as it should be. The complete lack of corr with value id odd, esp for t90
# do with td (3 state)
# do with rc
# def hist_t_delta(results,glm_name,hist_title='Data'):
# glm_data = simfMRI.misc.repack_glm(results,glm_name)
# tvalues = np.array(glm_data['t'])
# nan_mask = np.isnan(tvalues)
# tvalues[nan_mask] = 0.0
# # nans break hists
# plt.rcParams['font.size'] = 16
# fig = plt.figure()
# ax = fig.add_subplot(111)
# # fig init
# # X is 0:base,1:unit,2:values,3:rpes,4:acc,5:rand,6:dummy
# dataloc = [4,2,3,5]
# labels = ['Accuracy','Value','RPE','Random']
# colors = ['orange','green','blue','black']
# hist_params = dict(bins=100,histtype='stepfilled',alpha=0.4,normed=True)
# for lc,lb,co in zip(dataloc,labels,colors):
# print(lc,lb,co)
# ax.hist(tvalues[:,lc],label=lb,color=co,**hist_params)
# ax.axvline(x=2.015,label='p < 0.05',color='red',linewidth=3)
# ax.axvline(x=3.36493,label='p < 0.01',color='red',linewidth=2)
# ax.axvline(x=4.03214,label='p < 0.005',color='red',linewidth=1)
# ax.set_xlabel('t-values')
# ax.set_ylabel('Normalized counts')
# plt.xlim(-10,15)
# plt.ylim(0,.6)
# plt.legend(bbox_to_anchor=(1.10, 1.10))
# plt.title(hist_title)
# plt.show()
# def hist_beta_delta(results,glm_name):
# """
# Plot a prety histogram, masking values over or under 10.
# """
# import numpy as np
# glm_data = simfMRI.misc.repack_glm(results,glm_name)
# betas = np.ma.asarray(glm_data['beta'])
# betas = np.ma.masked_outside(betas,-10,10)
# betas = betas.filled(0.0)
# plt.rcParams['font.size'] = 16
# fig = plt.figure()
# ax = fig.add_subplot(111)
# # fig init
# # X is 0:base,1:unit,2:values,3:rpes,4:acc,5:rand,6:dummy
# dataloc = [1,4,2,3,5]
# labels = ['Unit','Accuracy','Value','RPE','Random']
# colors = ['black','orange','green','blue','purple']
# hist_params = dict(bins=100,histtype='stepfilled',alpha=0.4,normed=True)
# for lc,lb,co in zip(dataloc,labels,colors):
# print(lc,lb,co)
# ax.hist(betas[:,lc],label=lb,color=co,**hist_params)
# ax.set_xlabel('Beta values')
# ax.set_ylabel('Relative counts')
# plt.xlim(-10,10)
# plt.ylim(0,3)
# plt.legend(bbox_to_anchor=(1.10, 1.10))
# plt.show()
# def mean_beta_by_glm(
# results_list=[],glm_name='glm_acc',cols={'acc':4},n_cols=7,x_vals=[30,60,90],xlab=''):
# """
# Plots (and returns the values of) M and SD for select beta
# values in glm_name for given sequence of results objects.
# If over, the data for each col is overlayed in the
# same plot. Up to 4 cols are allowed
# """
# import numpy as np
# from collections import defaultdict
# if len(cols) > 5:
# raise ValueError, 'There were more than 5 cols.'
# ## Want the M and SD data grouped by col.keys()
# ## where each glm's data is a element in a list
# all_M = np.zeros((len(results_list),n_cols))
# all_SD = np.zeros_like(all_M)
# for ii,result in enumerate(results_list):
# glm_data = simfMRI.misc.repack_glm(result,glm_name)
# betas = np.ma.asarray(glm_data['beta'])
# betas = np.ma.masked_outside(betas,-10,10)
# all_M[ii,] = betas.mean(0)
# all_SD[ii,] = betas.std(0)
# # get ALL column means and std devs
# plt.rcParams['font.size'] = 16
# fig = plt.figure()
# ax = fig.add_subplot(111)
# # fig init
# for name,col in cols.items():
# ax.errorbar(x=x_vals, y=all_M[:,col], yerr=all_SD[:,col],
# label=name,fmt='o')
# x_range = x_vals[1] - x_vals[0]
# ax.set_xticks(range(0,max(x_vals)+x_range*2,30))
# plt.ylabel('Avg Beta')
# plt.xlabel(xlab)
# plt.legend()
# plt.show()
# def mean_t_by_glm(
# results_list=[],glm_name='glm_acc',cols={'acc':4},n_cols=7,x_vals=[30,60,90],xlab=''):
# """
# Plots (and returns the values of) M and SD for select beta
# values in glm_name for given sequence of results objects.
# If over, the data for each col is overlayed in the
# same plot. Up to 4 cols are allowed
# """
# import numpy as np
# from collections import defaultdict
# if len(cols) > 5:
# raise ValueError, 'There were more than 5 cols.'
# ## Want the M and SD data grouped by col.keys()
# ## where each glm's data is a element in a list
# all_M = np.zeros((len(results_list),n_cols))
# all_SD = np.zeros_like(all_M)
# for ii,result in enumerate(results_list):
# glm_data = simfMRI.misc.repack_glm(result,glm_name)
# tvalues = np.array(glm_data['t'])
# nan_mask = np.isnan(tvalues)
# tvalues[nan_mask] = 0.0
# all_M[ii,] = tvalues.mean(0)
# all_SD[ii,] = tvalues.std(0)
# # get ALL column means and std devs
# plt.rcParams['font.size'] = 16
# fig = plt.figure()
# ax = fig.add_subplot(111)
# # fig init
# for name,col in cols.items():
# ax.errorbar(x=x_vals, y=all_M[:,col], yerr=all_SD[:,col],
# label=name,fmt='o')
# x_range = x_vals[1] - x_vals[0]
# ax.set_xticks(range(0,max(x_vals)+x_range*2,x_range))
# plt.ylabel('Avg t-value')
# plt.xlabel(xlab)
# plt.ylim(-2,10)
# plt.legend(bbox_to_anchor=(1.10, 1.10))
# plt.show()
# return all_M,all_SD
|
|
from django.contrib.auth.models import User
from django.urls import reverse
from django.test import Client, TestCase
from ...model_generator import get_alphabetic_user_profiles, create_profiled_user
from ddcz.tavern import (
LIST_ALL,
LIST_FAVORITE,
create_tavern_table,
get_tavern_table_list,
bookmark_table,
)
class TavernListingTestCase(TestCase):
def assertTableInListing(self, table, listing):
self.assertIn(table.pk, [table.pk for table in listing])
def assertTableNotInListing(self, table, listing):
self.assertNotIn(table.pk, [table.pk for table in listing])
class TestListingDoesNotHaveRunawayQueries(TavernListingTestCase):
# This can be incremented, but the point is not to let pointless runaway queries happen
# as it did before, see <https://github.com/dracidoupe/graveyard/issues/302>
EXPECTED_LIST_QUERIES = 4
def setUp(self):
super().setUp()
self.owner, self.banned = get_alphabetic_user_profiles(
number_of_users=2, saved=True
)
self.owner_user = create_profiled_user(
username="owner", password="bobr-evropsky"
)
self.banned_user = create_profiled_user(
username="banned", password="bobrice-evropska"
)
for i in range(0, 15):
table = create_tavern_table(
owner=self.owner,
public=True,
name=f"Public {i}",
description="Public Table",
)
table.update_access_privileges(access_banned=[self.banned.pk])
self.client = Client()
def test_owner_not_runaway(self):
self.client.force_login(user=self.owner_user)
with self.assertNumQueries(self.EXPECTED_LIST_QUERIES):
self.client.get(f"{reverse('ddcz:tavern-list')}?vypis=vsechny")
def test_banned_not_runaway(self):
self.client.force_login(user=self.banned_user)
with self.assertNumQueries(self.EXPECTED_LIST_QUERIES):
self.client.get(f"{reverse('ddcz:tavern-list')}?vypis=vsechny")
class TestPublicTableListings(TavernListingTestCase):
def setUp(self):
super().setUp()
(
self.owner,
self.banned,
self.unaffected,
) = self.profiles = get_alphabetic_user_profiles(number_of_users=3, saved=True)
self.public_table = create_tavern_table(
owner=self.owner,
public=True,
name="Public",
description="Public Tavern Table",
)
self.public_table.update_access_privileges(access_banned=[self.banned.pk])
self.bookmarked_public_table = create_tavern_table(
owner=self.owner,
public=True,
name="Public Bookmarked",
description="Bookmarked Public Tavern Table",
)
self.bookmarked_public_table.update_access_privileges(
access_banned=[self.banned.pk]
)
for user in self.profiles:
bookmark_table(user, self.bookmarked_public_table)
def test_shown_to_random_user(self):
self.assertTableInListing(
self.public_table, get_tavern_table_list(self.unaffected, LIST_ALL)
)
def test_shown_to_owner(self):
self.assertTableInListing(
self.public_table, get_tavern_table_list(self.owner, LIST_ALL)
)
def test_shown_to_banned(self):
self.assertTableInListing(
self.public_table, get_tavern_table_list(self.banned, LIST_ALL)
)
def test_not_linked_to_banned(self):
self.assertFalse(self.public_table.show_listing_link(self.banned))
def test_both_tables_shown(self):
self.assertEquals(2, len(get_tavern_table_list(self.unaffected, LIST_ALL)))
def test_bookmark_shown_to_random_user(self):
self.assertTableInListing(
self.bookmarked_public_table,
get_tavern_table_list(self.unaffected, LIST_FAVORITE),
)
def test_bookmark_shown_to_owner(self):
self.assertTableInListing(
self.bookmarked_public_table,
get_tavern_table_list(self.owner, LIST_FAVORITE),
)
def test_bookmark_shown_to_banned(self):
self.assertTableInListing(
self.bookmarked_public_table,
get_tavern_table_list(self.banned, LIST_FAVORITE),
)
def test_bookmark_not_linked_to_banned(self):
self.assertFalse(self.bookmarked_public_table.show_listing_link(self.banned))
def test_only_favorite_shown(self):
self.assertEquals(1, len(get_tavern_table_list(self.unaffected, LIST_FAVORITE)))
class TestOwnerAssistOverrule(TavernListingTestCase):
def setUp(self):
super().setUp()
self.owner, self.assist, self.banned = get_alphabetic_user_profiles(
number_of_users=3, saved=True
)
self.public_table = create_tavern_table(
owner=self.owner,
public=True,
name="Public",
description="Public Tavern Table",
)
# Owner bans themselves
self.public_table.update_access_privileges(
access_banned=[self.owner.pk, self.assist.pk, self.banned.pk],
assistant_admins=[self.assist.pk],
)
def test_shown_admin(self):
self.assertTableInListing(
self.public_table, get_tavern_table_list(self.owner, LIST_ALL)
)
def test_shown_assist(self):
self.assertTableInListing(
self.public_table, get_tavern_table_list(self.owner, LIST_ALL)
)
def test_shown_banned(self):
self.assertTableInListing(
self.public_table, get_tavern_table_list(self.banned, LIST_ALL)
)
class TestPrivateTableListings(TavernListingTestCase):
def setUp(self):
super().setUp()
(
self.owner,
self.banned,
self.unaffected,
self.allowed,
self.assist,
) = self.profiles = get_alphabetic_user_profiles(number_of_users=5, saved=True)
self.private_table = create_tavern_table(
owner=self.owner,
public=False,
name="Private",
description="Private Tavern Table",
)
self.private_table.update_access_privileges(
access_banned=[self.banned.pk],
access_allowed=[self.allowed.pk],
assistant_admins=[self.assist.pk],
)
self.bookmarked_private_table = create_tavern_table(
owner=self.owner,
public=False,
name="Private Bookmarked",
description="Bookmarked Private Tavern Table",
)
self.bookmarked_private_table.update_access_privileges(
access_banned=[self.banned.pk],
access_allowed=[self.allowed.pk],
assistant_admins=[self.assist.pk],
)
for user in self.profiles:
bookmark_table(user, self.bookmarked_private_table)
def test_shown_to_random_user(self):
self.assertTableInListing(
self.private_table, get_tavern_table_list(self.unaffected, LIST_ALL)
)
def test_shown_to_owner(self):
self.assertTableInListing(
self.private_table, get_tavern_table_list(self.owner, LIST_ALL)
)
def test_shown_to_assist(self):
self.assertTableInListing(
self.private_table, get_tavern_table_list(self.assist, LIST_ALL)
)
def test_show_to_banned(self):
self.assertTableInListing(
self.private_table, get_tavern_table_list(self.banned, LIST_ALL)
)
def test_private_tables_shown(self):
self.assertEquals(2, len(get_tavern_table_list(self.unaffected, LIST_ALL)))
def test_both_tables_shown_to_allowed(self):
self.assertEquals(2, len(get_tavern_table_list(self.allowed, LIST_ALL)))
def test_bookmark_shown_to_random_user(self):
self.assertTableInListing(
self.bookmarked_private_table,
get_tavern_table_list(self.unaffected, LIST_FAVORITE),
)
def test_bookmark_shown_to_owner(self):
self.assertTableInListing(
self.bookmarked_private_table,
get_tavern_table_list(self.owner, LIST_FAVORITE),
)
def test_bookmark_shown_to_banned(self):
self.assertTableInListing(
self.bookmarked_private_table,
get_tavern_table_list(self.banned, LIST_FAVORITE),
)
|
|
#!/usr/bin/env python
from plasTeX.Renderers import Renderer as BaseRenderer
import textwrap, re, string
class ManPageRenderer(BaseRenderer):
""" Renderer for UNIX man pages """
outputType = unicode
fileExtension = '.man'
aliases = {
'superscript': 'active::^',
'subscript': 'active::_',
'dollar': '$',
'percent': '%',
'opencurly': '{',
'closecurly': '}',
'underscore': '_',
'ampersand': '&',
'hashmark': '#',
'space': ' ',
'tilde': 'active::~',
'at': '@',
'backslash': '\\',
}
def __init__(self, *args, **kwargs):
BaseRenderer.__init__(self, *args, **kwargs)
# Load dictionary with methods
for key in vars(type(self)):
if key.startswith('do__'):
self[self.aliases[key[4:]]] = getattr(self, key)
elif key.startswith('do_'):
self[key[3:]] = getattr(self, key)
self['default-layout'] = self['document-layout'] = self.default
self.footnotes = []
self.blocks = []
def default(self, node):
""" Rendering method for all non-text nodes """
# Handle characters like \&, \$, \%, etc.
if len(node.nodeName) == 1 and node.nodeName not in string.letters:
return self.textDefault(node.nodeName)
# Render child nodes
return unicode(node)
def textDefault(self, node):
return unicode(node)
def processFileContent(self, document, s):
s = BaseRenderer.processFileContent(self, document, s)
# Clean up newlines
s = re.sub(r'\s*\n(\s*\n)+', r'\n\n', s)
s = re.sub(r'(\s*\n)+(\.B[ld])', r'\n\2', s)
s = re.sub(r'(\.E[ld])\s*(\.B[ld])', r'\1\n\n\2', s)
s = re.sub(r'\.Ed\s*\.Bd', r'.Ed\n.Bd', s)
s = s.lstrip()
return s
# Alignment
def do_flushleft(self, node):
return u'\n.Bd -ragged\n%s\n.Ed\n' % node
do_raggedbottom = do_raggedright = do_leftline = do_flushleft
def center(self, text):
return u'\n.Bd -centered\n%s\n.Ed\n' % text
def do_center(self, node):
return self.center(unicode(node))
do_centering = do_centerline = do_center
def do_flushright(self, node):
return u'\n.Bd -offset right\n%s\n.Ed\n' % node
do_raggedleft = do_llap = do_flushright
# Arrays
def do_array(self, node, render=unicode):
output = ['.TS']
# Process colspecs
if node.colspec:
alignments = [x.style['text-align'] for x in node.colspec]
else:
alignments = ['l']*100
for row in node:
colspec = []
for i, cell in enumerate(row):
colspec.append(cell.style.get('text-align', alignments[i])[0])
output.append(' '.join(colspec))
output[-1] += '.'
# Render table
for row in node:
content = []
for cell in row:
content.append(render(cell).strip())
output.append('\t'.join(content))
output.append('.TE')
output.append('')
return re.sub(r'\s*.TE\s*', r'\n.TE\n', u'\n'.join(output))
do_tabular = do_tabularx = do_longtable = do_array
def do_cline(self, node):
return ''
def do_multicolumn(self, node):
return unicode(node)
# Bibliography
def do_thebibliography(self, node):
output = ['','.Sh Bibliography','']
output.append('.Bl -tag -width indent')
for item in node:
output.append('.It %s' % unicode(item.bibcite).strip())
output.append(unicode(item).strip())
output.append('.El')
output.append('')
return u'\n'.join(output)
def do_bibliographystyle(self, node):
return u''
def do_bibliography(self, node):
return self.default(node)
def do_cite(self, node):
output = []
for item in node.citation():
output.append(unicode(item))
return u''.join(output)
def do_bibliographyref(self, node):
return self.default(node)
# Boxes
do_mbax = do_makebox = do_fbox = do_framebox = do_parbox = default
do_minipage = do_raisebox = do_rule = default
# Breaking
def do_linebreak(self, node):
return u'\n\n'
do_newline = do_pagebreak = do_newpage = do_clearpage = do_cleardoublepage = do_linebreak
# Crossref
def do_ref(self, node):
return unicode(node.idref['label'].ref)
def do_pageref(self, node):
return u'*'
def do_label(self, node):
return u''
# Floats
def do_figure(self, node):
return unicode(node)
do_table = do_marginpar = do_figure
def do_caption(self, node):
return u'\n%s %s: %s\n' % (node.title, node.ref, unicode(node).strip())
# Font Selection
do_sffamily = do_textsf = default
do_upshape = do_textup = default
do_scshape = do_textsc = default
do_sc = default
do_tiny = do_scriptsize = do_footnotesize = do_small = default
do_normalsize = do_large = do_Large = do_LARGE = do_huge = do_HUGE = default
def do_textbf(self, node):
return u'\\fB%s\\fP' % node
do_bfseries = do_bf = do_textbf
def do_textit(self, node):
return u'\\fI%s\\fP' % node
do_itshape = do_it = do_slshape = do_textsl = do_sl = do_cal = do_textit
def do_texttt(self, node):
return u'\\fC%s\\fP' % node
do_ttfamily = do_tt = do_texttt
def do_textmd(self, node):
return u'\\fR%s\\fP' % node
do_mdseries = do_rmfamily = do_textrm = do_textnormal = do_rm = do_textmd
def do_symbol(self, node):
return u'*'
# Footnotes
def do_footnote(self, node):
mark = u'[%s]' % (len(self.footnotes)+1)
self.footnotes.append(unicode(node))
return mark
def do_footnotetext(self, node):
self.do_footnote(self, node)
return ''
def do_footnotemark(self, node):
return u'[%s]' % (len(self.footnotes)+1)
# Index
def do_theindex(self, node):
return u''
do_printindex = do_index = do_theindex
# Lists
def do_itemize(self, node):
output =['','.Bl -bullet -offset 3n -compact']
for item in node:
output.append('.It')
output.append(unicode(item).strip())
output.append('.El')
output.append('')
return u'\n'.join(output)
def do_enumerate(self, node):
output = ['','.Bl -enum -offset 3n -compact']
for item in node:
output.append('.It')
output.append(unicode(item).strip())
output.append('.El')
output.append('')
return u'\n'.join(output)
def do_description(self, node):
output = ['','.Bl -tag -width 3n']
for item in node:
output.append('.It %s' % unicode(item.attributes.get('term','')).strip())
output.append(unicode(item).strip())
output.append('.El')
output.append('')
return u'\n'.join(output)
do_list = do_trivlist = do_description
# Math
def do_math(self, node):
return re.sub(r'\s*(_|\^)\s*', r'\1', node.source.replace('\\','\\\\'))
do_ensuremath = do_math
def do_equation(self, node):
s = u' %s' % re.compile(r'^\s*\S+\s*(.*?)\s*\S+\s*$', re.S).sub(r'\1', node.source.replace('\\','\\\\'))
return re.sub(r'\s*(_|\^)\s*', r'\1', s)
do_displaymath = do_equation
def do_eqnarray(self, node):
def render(node):
s = re.compile(r'^\$\\\\displaystyle\s*(.*?)\s*\$\s*$', re.S).sub(r'\1', node.source.replace('\\','\\\\'))
return re.sub(r'\s*(_|\^)\s*', r'\1', s)
return self.do_array(node, render=render)
do_align = do_gather = do_falign = do_multiline = do_eqnarray
do_multline = do_alignat = do_split = do_eqnarray
# Misc
do_bgroup = default
def do_def(self, node):
return u''
do_tableofcontents = do_input = do_protect = do_let = do_def
do_newcommand = do_hfill = do_hline = do_openout = do_renewcommand = do_def
do_write = do_hspace = do_appendix = do_global = do_noindent = do_def
do_include = do_markboth = do_setcounter = do_refstepcounter = do_def
do_medskip = do_smallskip = do_parindent = do_indent = do_setlength = do_def
do_settowidth = do_addtolength = do_nopagebreak = do_newwrite = do_def
do_newcounter = do_typeout = do_sloppypar = do_hfil = do_thispagestyle = do_def
def do_egroup(self, node):
return u''
# Pictures
def do_picture(self, node):
return u''
# Primitives
def do_par(self, node):
return u'\n%s\n' % unicode(node).strip()
def do__superscript(self, node):
return self.default(node)
def do__subscript(self, node):
return self.default(node)
# Quotations
def do_quote(self, node):
return self.center(node)
do_quotation = do_verse = do_quote
# Sectioning
def do_document(self, node):
content = unicode(node).rstrip()
footnotes = ''
if self.footnotes:
output = ['','.Bl -tag -offset indent']
for i, item in enumerate(self.footnotes):
output.append('.It [%s]' % (i+1))
output.append(item)
output.append('.El')
output.append('')
footnotes = '\n'.join(output)
return u'%s%s' % (content, footnotes)
def do_maketitle(self, node):
output = []
metadata = node.ownerDocument.userdata
if 'date' in metadata:
output.append('.Dd %s' % metadata['date'])
if 'title' in metadata:
output.append('.Dt %s' % unicode(metadata['title']).upper())
output.append('')
return u'\n'.join(output)
def do_section(self, node):
return u'.Sh %s\n%s' % (node.title, node)
do_part = do_chapter = do_section
def do_subsection(self, node):
return u'.Ss %s\n%s' % (node.title, node)
do_subsubsection = do_paragraph = do_subparagraph = do_subsubparagraph = do_subsection
def do_title(self, node):
return u''
do_author = do_date = do_thanks = do_title
def do_abstract(self, node):
return self.center(unicode(node).strip())
# Sentences
def do__dollar(self, node):
return u'$'
def do__percent(self, node):
return u'%'
def do__opencurly(self, node):
return u'{'
def do__closecurly(self, node):
return u'}'
def do__underscore(self, node):
return u'_'
def do__ampersand(self, node):
return u'&'
def do__hashmark(self, node):
return u'#'
def do__space(self, node):
return u' '
def do_LaTeX(self, node):
return u'LaTeX'
def do_TeX(self, node):
return u'TeX'
def do_emph(self, node):
return self.default(node)
do_em = do_emph
def do__tilde(self, node):
return u' '
def do_enspace(self, node):
return u' '
do_quad = do_qquad = do_enspace
def do_enskip(self, node):
return u''
do_thinspace = do_enskip
def do_underbar(self, node):
return self.default(node)
# Space
def do_hspace(self, node):
return u' '
def do_vspace(self, node):
return u''
do_bigskip = do_medskip = do_smallskip = do_vspace
# Tabbing - not implemented yet
# Verbatim
def do_verbatim(self, node):
return u'\n.Bd -literal%s.Ed\n' % node
do_alltt = do_verbatim
def do_mbox(self, node):
return self.default(node)
def do__at(self, node):
return u''
def do__backslash(self, node):
return u'\\'
Renderer = ManPageRenderer
|
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile.layout.base import Layout
class _BspNode():
def __init__(self, parent=None):
self.parent = parent
self.children = []
self.split_horizontal = None
self.split_ratio = 50
self.client = None
self.x = self.y = 0
self.w = 16
self.h = 9
def __iter__(self):
yield self
for child in self.children:
for c in child:
yield c
def clients(self):
if self.client:
yield self.client
else:
for child in self.children:
for c in child.clients():
yield c
def _shortest(self, l):
if len(self.children) == 0:
return self, l
else:
c0, l0 = self.children[0]._shortest(l + 1)
c1, l1 = self.children[1]._shortest(l + 1)
return (c1, l1) if l1 < l0 else (c0, l0)
def get_shortest(self):
return self._shortest(0)[0]
def insert(self, client, idx, ratio):
if self.client is None:
self.client = client
return self
self.children = [_BspNode(self), _BspNode(self)]
self.children[1 - idx].client = self.client
self.children[idx].client = client
self.client = None
self.split_horizontal = True if self.w > self.h * ratio else False
return self.children[idx]
def remove(self, child):
keep = self.children[1 if child is self.children[0] else 0]
self.children = keep.children
for c in self.children:
c.parent = self
self.split_horizontal = keep.split_horizontal
self.split_ratio = keep.split_ratio
self.client = keep.client
return self
def distribute(self):
if len(self.children) == 0:
return 1, 1
h0, v0 = self.children[0].distribute()
h1, v1 = self.children[1].distribute()
if self.split_horizontal:
h = h0 + h1
v = max(v0, v1)
self.split_ratio = 100 * h0 / h
else:
h = max(h0, h1)
v = v0 + v1
self.split_ratio = 100 * v0 / v
return h, v
def calc_geom(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
if len(self.children) > 1:
if self.split_horizontal:
w0 = int(self.split_ratio * w * 0.01 + 0.5)
self.children[0].calc_geom(x, y, w0, h)
self.children[1].calc_geom(x + w0, y, w - w0, h)
else:
h0 = int(self.split_ratio * h * 0.01 + 0.5)
self.children[0].calc_geom(x, y, w, h0)
self.children[1].calc_geom(x, y + h0, w, h - h0)
class Bsp(Layout):
"""This layout is inspired by bspwm, but it does not try to copy its
features.
The first client occupies the entire screen space. When a new client
is created, the selected space is partitioned in 2 and the new client
occupies one of those subspaces, leaving the old client with the other.
The partition can be either horizontal or vertical according to the
dimensions of the current space: if its width/height ratio is above
a pre-configured value, the subspaces are created side-by-side,
otherwise, they are created on top of each other. The partition
direction can be freely toggled. All subspaces can be resized and
clients can be shuffled around.
All clients are organized at the leaves of a full binary tree.
An example key configuration is::
Key([mod], "j", lazy.layout.down()),
Key([mod], "k", lazy.layout.up()),
Key([mod], "h", lazy.layout.left()),
Key([mod], "l", lazy.layout.right()),
Key([mod, "shift"], "j", lazy.layout.shuffle_down()),
Key([mod, "shift"], "k", lazy.layout.shuffle_up()),
Key([mod, "shift"], "h", lazy.layout.shuffle_left()),
Key([mod, "shift"], "l", lazy.layout.shuffle_right()),
Key([mod, "mod1"], "j", lazy.layout.flip_down()),
Key([mod, "mod1"], "k", lazy.layout.flip_up()),
Key([mod, "mod1"], "h", lazy.layout.flip_left()),
Key([mod, "mod1"], "l", lazy.layout.flip_right()),
Key([mod, "control"], "j", lazy.layout.grow_down()),
Key([mod, "control"], "k", lazy.layout.grow_up()),
Key([mod, "control"], "h", lazy.layout.grow_left()),
Key([mod, "control"], "l", lazy.layout.grow_right()),
Key([mod, "shift"], "n", lazy.layout.normalize()),
Key([mod], "Return", lazy.layout.toggle_split()),
"""
defaults = [
("name", "bsp", "Name of this layout."),
("border_focus", "#881111", "Border colour for the focused window."),
("border_normal", "#220000", "Border colour for un-focused windows."),
("border_width", 2, "Border width."),
("margin", 0, "Margin of the layout."),
("ratio", 1.6,
"Width/height ratio that defines the partition direction."),
("grow_amount", 10, "Amount by which to grow a window/column."),
("lower_right", True, "New client occupies lower or right subspace."),
("fair", True, "New clients are inserted in the shortest branch."),
]
def __init__(self, **config):
Layout.__init__(self, **config)
self.add_defaults(Bsp.defaults)
self.root = _BspNode()
self.current = self.root
def clone(self, group):
c = Layout.clone(self, group)
c.root = _BspNode()
c.current = c.root
return c
def info(self):
return dict(
name=self.name,
clients=[c.name for c in self.root.clients()])
def get_node(self, client):
for node in self.root:
if client is node.client:
return node
def focus(self, client):
self.current = self.get_node(client)
def add(self, client):
node = self.root.get_shortest() if self.fair else self.current
self.current = node.insert(client, int(self.lower_right), self.ratio)
def remove(self, client):
node = self.get_node(client)
if node:
if node.parent:
node = node.parent.remove(node)
newclient = next(node.clients(), None)
if newclient is None:
self.current = self.root
else:
self.current = self.get_node(newclient)
return newclient
node.client = None
self.current = self.root
def configure(self, client, screen):
self.root.calc_geom(screen.x, screen.y, screen.width,
screen.height)
node = self.get_node(client)
color = self.group.qtile.color_pixel(
self.border_focus if client.has_focus else self.border_normal)
border = 0 if node is self.root else self.border_width
client.place(
node.x,
node.y,
node.w - 2 * border,
node.h - 2 * border,
border,
color,
margin=self.margin)
client.unhide()
def cmd_toggle_split(self):
if self.current.parent:
self.current.parent.split_horizontal = not self.current.parent.split_horizontal
self.group.layout_all()
def focus_first(self):
return next(self.root.clients(), None)
def focus_last(self):
clients = list(self.root.clients())
return clients[-1] if len(clients) else None
def focus_next(self, client):
clients = list(self.root.clients())
if client in clients:
idx = clients.index(client)
if idx + 1 < len(clients):
return clients[idx + 1]
def focus_previous(self, client):
clients = list(self.root.clients())
if client in clients:
idx = clients.index(client)
if idx > 0:
return clients[idx - 1]
def cmd_next(self):
client = self.focus_next(self.current)
if client:
self.group.focus(client, True)
def cmd_previous(self):
client = self.focus_previous(self.current)
if client:
self.group.focus(client, True)
def find_left(self):
child = self.current
parent = child.parent
while parent:
if parent.split_horizontal and child is parent.children[1]:
neighbor = parent.children[0]
center = self.current.y + self.current.h * 0.5
while neighbor.client is None:
if neighbor.split_horizontal or neighbor.children[1].y < center:
neighbor = neighbor.children[1]
else:
neighbor = neighbor.children[0]
return neighbor
child = parent
parent = child.parent
def find_right(self):
child = self.current
parent = child.parent
while parent:
if parent.split_horizontal and child is parent.children[0]:
neighbor = parent.children[1]
center = self.current.y + self.current.h * 0.5
while neighbor.client is None:
if neighbor.split_horizontal or neighbor.children[1].y > center:
neighbor = neighbor.children[0]
else:
neighbor = neighbor.children[1]
return neighbor
child = parent
parent = child.parent
def find_up(self):
child = self.current
parent = child.parent
while parent:
if not parent.split_horizontal and child is parent.children[1]:
neighbor = parent.children[0]
center = self.current.x + self.current.w * 0.5
while neighbor.client is None:
if not neighbor.split_horizontal or neighbor.children[1].x < center:
neighbor = neighbor.children[1]
else:
neighbor = neighbor.children[0]
return neighbor
child = parent
parent = child.parent
def find_down(self):
child = self.current
parent = child.parent
while parent:
if not parent.split_horizontal and child is parent.children[0]:
neighbor = parent.children[1]
center = self.current.x + self.current.w * 0.5
while neighbor.client is None:
if not neighbor.split_horizontal or neighbor.children[1].x > center:
neighbor = neighbor.children[0]
else:
neighbor = neighbor.children[1]
return neighbor
child = parent
parent = child.parent
def cmd_left(self):
node = self.find_left()
if node:
self.group.focus(node.client, True)
def cmd_right(self):
node = self.find_right()
if node:
self.group.focus(node.client, True)
def cmd_up(self):
node = self.find_up()
if node:
self.group.focus(node.client, True)
def cmd_down(self):
node = self.find_down()
if node:
self.group.focus(node.client, True)
def cmd_shuffle_left(self):
node = self.find_left()
if node:
node.client, self.current.client = self.current.client, node.client
self.current = node
self.group.layout_all()
elif self.current is not self.root:
node = self.current
self.remove(node.client)
newroot = _BspNode()
newroot.split_horizontal = True
newroot.children = [node, self.root]
self.root.parent = newroot
node.parent = newroot
self.root = newroot
self.current = node
self.group.layout_all()
def cmd_shuffle_right(self):
node = self.find_right()
if node:
node.client, self.current.client = self.current.client, node.client
self.current = node
self.group.layout_all()
elif self.current is not self.root:
node = self.current
self.remove(node.client)
newroot = _BspNode()
newroot.split_horizontal = True
newroot.children = [self.root, node]
self.root.parent = newroot
node.parent = newroot
self.root = newroot
self.current = node
self.group.layout_all()
def cmd_shuffle_up(self):
node = self.find_up()
if node:
node.client, self.current.client = self.current.client, node.client
self.current = node
self.group.layout_all()
elif self.current is not self.root:
node = self.current
self.remove(node.client)
newroot = _BspNode()
newroot.split_horizontal = False
newroot.children = [node, self.root]
self.root.parent = newroot
node.parent = newroot
self.root = newroot
self.current = node
self.group.layout_all()
def cmd_shuffle_down(self):
node = self.find_down()
if node:
node.client, self.current.client = self.current.client, node.client
self.current = node
self.group.layout_all()
elif self.current is not self.root:
node = self.current
self.remove(node.client)
newroot = _BspNode()
newroot.split_horizontal = False
newroot.children = [self.root, node]
self.root.parent = newroot
node.parent = newroot
self.root = newroot
self.current = node
self.group.layout_all()
def cmd_grow_left(self):
child = self.current
parent = child.parent
while parent:
if parent.split_horizontal and child is parent.children[1]:
parent.split_ratio = max(5,
parent.split_ratio - self.grow_amount)
self.group.layout_all()
break
child = parent
parent = child.parent
def cmd_grow_right(self):
child = self.current
parent = child.parent
while parent:
if parent.split_horizontal and child is parent.children[0]:
parent.split_ratio = min(95,
parent.split_ratio + self.grow_amount)
self.group.layout_all()
break
child = parent
parent = child.parent
def cmd_grow_up(self):
child = self.current
parent = child.parent
while parent:
if not parent.split_horizontal and child is parent.children[1]:
parent.split_ratio = max(5,
parent.split_ratio - self.grow_amount)
self.group.layout_all()
break
child = parent
parent = child.parent
def cmd_grow_down(self):
child = self.current
parent = child.parent
while parent:
if not parent.split_horizontal and child is parent.children[0]:
parent.split_ratio = min(95,
parent.split_ratio + self.grow_amount)
self.group.layout_all()
break
child = parent
parent = child.parent
def cmd_flip_left(self):
child = self.current
parent = child.parent
while parent:
if parent.split_horizontal and child is parent.children[1]:
parent.children = parent.children[::-1]
self.group.layout_all()
break
child = parent
parent = child.parent
def cmd_flip_right(self):
child = self.current
parent = child.parent
while parent:
if parent.split_horizontal and child is parent.children[0]:
parent.children = parent.children[::-1]
self.group.layout_all()
break
child = parent
parent = child.parent
def cmd_flip_up(self):
child = self.current
parent = child.parent
while parent:
if not parent.split_horizontal and child is parent.children[1]:
parent.children = parent.children[::-1]
self.group.layout_all()
break
child = parent
parent = child.parent
def cmd_flip_down(self):
child = self.current
parent = child.parent
while parent:
if not parent.split_horizontal and child is parent.children[0]:
parent.children = parent.children[::-1]
self.group.layout_all()
break
child = parent
parent = child.parent
def cmd_normalize(self):
distribute = True
for node in self.root:
if node.split_ratio != 50:
node.split_ratio = 50
distribute = False
if distribute:
self.root.distribute()
self.group.layout_all()
|
|
# This Python module is part of the PyRate software package.
#
# Copyright 2022 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
This Python module contains tests for the timeseries.py PyRate module.
"""
import os
import shutil
from copy import deepcopy
import pytest
from datetime import date, timedelta
from numpy import nan, asarray, where, array
import numpy as np
from numpy.testing import assert_array_almost_equal
import pyrate.constants as C
import pyrate.core.orbital
import pyrate.core.prepifg_helper
import pyrate.core.ref_phs_est
import pyrate.core.refpixel
import tests.common as common
from pyrate.core import mst, covariance
from pyrate import correct, prepifg, conv2tif
from pyrate.configuration import Configuration
from pyrate.core.timeseries import time_series, linear_rate_pixel, linear_rate_array, TimeSeriesError
def default_params():
return {C.TIME_SERIES_METHOD: 1,
C.TIME_SERIES_PTHRESH: 0,
C.TIME_SERIES_SM_ORDER: 2,
C.TIME_SERIES_SM_FACTOR: -0.25,
C.PARALLEL: 0,
C.PROCESSES: 1,
C.NAN_CONVERSION: 1,
C.NO_DATA_VALUE: 0}
class SinglePixelIfg(object):
"""
A single pixel ifg (interferogram) solely for unit testing
"""
def __init__(self, first, second, phase, nan_fraction):
self.phase_data = asarray([[phase]])
self.first = first
self.second = second
self.nrows = 1
self.ncols = 1
self.nan_fraction = asarray([nan_fraction])
def convert_to_nans(self, val=0):
"""
Converts given values in phase data to NaNs
val - value to convert, default is 0
"""
self.phase_data = where(self.phase_data == val, nan, self.phase_data)
self.nan_converted = True
class TestTimeSeries:
"""Verifies error checking capabilities of the time_series function"""
@classmethod
def setup_class(cls):
cls.ifgs = common.small_data_setup()
cls.params = default_params()
cls.mstmat = mst.mst_boolean_array(cls.ifgs)
r_dist = covariance.RDist(cls.ifgs[0])()
cls.maxvar = [covariance.cvd(i.data_path, cls.params, r_dist)[0]
for i in cls.ifgs]
cls.vcmt = covariance.get_vcmt(cls.ifgs, cls.maxvar)
def test_time_series_unit(self):
"""
Checks that the code works the same as the calculated example
"""
ifirst = asarray([1, 1, 2, 2, 3, 3, 4, 5])
isecond = asarray([2, 4, 3, 4, 5, 6, 6, 6])
timeseries = asarray([0.0, 0.1, 0.6, 0.8, 1.1, 1.3])
phase = asarray([0.5, 4, 2.5, 3.5, 2.5, 3.5, 2.5, 1])
nan_fraction = asarray([0.5, 0.4, 0.2, 0.3, 0.1, 0.3, 0.2, 0.1])
now = date.today()
dates = [now + timedelta(days=(t*365.25)) for t in timeseries]
dates.sort()
first = [dates[m_num - 1] for m_num in ifirst]
second = [dates[s_num - 1] for s_num in isecond]
self.ifgs = [SinglePixelIfg(m, s, p, n) for m, s, p, n in
zip(first, second, phase, nan_fraction)]
tsincr, tscum, tsvel = time_series(
self.ifgs, params=self.params, vcmt=self.vcmt, mst=None)
expected = asarray([[[0.50, 3.0, 4.0, 5.5, 6.5]]])
assert_array_almost_equal(tscum, expected, decimal=2)
class TestLegacyTimeSeriesEquality:
@classmethod
def setup_class(cls):
params = Configuration(common.TEST_CONF_ROIPAC).__dict__
params[C.TEMP_MLOOKED_DIR] = os.path.join(params[C.OUT_DIR],
C.TEMP_MLOOKED_DIR)
conv2tif.main(params)
prepifg.main(params)
params[C.REF_EST_METHOD] = 2
xlks, _, crop = pyrate.core.prepifg_helper.transform_params(params)
dest_paths, headers = common.repair_params_for_correct_tests(params[C.INTERFEROGRAM_DIR], params)
correct._copy_mlooked(params)
copied_dest_paths = [os.path.join(params[C.TEMP_MLOOKED_DIR], os.path.basename(d)) for d in dest_paths]
del dest_paths
# start run_pyrate copy
ifgs = common.pre_prepare_ifgs(copied_dest_paths, params)
mst_grid = common.mst_calculation(copied_dest_paths, params)
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(params)
params[C.REFX] = refx
params[C.REFY] = refy
params[C.ORBFIT_OFFSET] = True
# Estimate and remove orbit errors
pyrate.core.orbital.remove_orbital_error(ifgs, params)
ifgs = common.prepare_ifgs_without_phase(copied_dest_paths, params)
for ifg in ifgs:
ifg.close()
correct._update_params_with_tiles(params)
_, ifgs = pyrate.core.ref_phs_est.ref_phase_est_wrapper(params)
ifgs[0].open()
r_dist = covariance.RDist(ifgs[0])()
ifgs[0].close()
maxvar = [covariance.cvd(i, params, r_dist)[0] for i in copied_dest_paths]
for ifg in ifgs:
ifg.open()
vcmt = covariance.get_vcmt(ifgs, maxvar)
for ifg in ifgs:
ifg.close()
ifg.open()
ifg.nodata_value = 0.0
params[C.TIME_SERIES_METHOD] = 1
params[C.PARALLEL] = 0
# Calculate time series
cls.tsincr_0, cls.tscum_0, _ = common.calculate_time_series(ifgs, params, vcmt, mst=mst_grid)
params[C.PARALLEL] = 1
cls.tsincr_1, cls.tscum_1, cls.tsvel_1 = common.calculate_time_series(ifgs, params, vcmt, mst=mst_grid)
# load the legacy data
ts_dir = os.path.join(common.SML_TEST_DIR, 'time_series')
tsincr_path = os.path.join(ts_dir, 'ts_incr_interp0_method1.csv')
ts_incr = np.genfromtxt(tsincr_path)
tscum_path = os.path.join(ts_dir, 'ts_cum_interp0_method1.csv')
ts_cum = np.genfromtxt(tscum_path)
cls.ts_incr = np.reshape(ts_incr, newshape=cls.tsincr_0.shape, order='F')
cls.ts_cum = np.reshape(ts_cum, newshape=cls.tscum_0.shape, order='F')
cls.params = params
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.params[C.OUT_DIR])
def test_time_series_equality_parallel_by_rows(self):
"""
check time series parallel by rows jobs
"""
self.assertEqual(self.tsincr_1.shape, self.tscum_1.shape)
self.assertEqual(self.tsvel_1.shape, self.tsincr_1.shape)
np.testing.assert_array_almost_equal(
self.ts_incr, self.tsincr_1, decimal=3)
np.testing.assert_array_almost_equal(
self.ts_cum, self.tscum_1, decimal=3)
def test_time_series_equality_serial_by_the_pixel(self):
"""
check time series
"""
self.assertEqual(self.tsincr_0.shape, self.tscum_0.shape)
np.testing.assert_array_almost_equal(
self.ts_incr, self.tsincr_0, decimal=3)
np.testing.assert_array_almost_equal(
self.ts_cum, self.tscum_0, decimal=3)
@staticmethod
def assertEqual(val1, val2):
assert val1 == val2
class TestLegacyTimeSeriesEqualityMethod2Interp0:
@classmethod
def setup_class(cls):
params = Configuration(common.TEST_CONF_ROIPAC).__dict__
params[C.TEMP_MLOOKED_DIR] = os.path.join(params[C.OUT_DIR],
C.TEMP_MLOOKED_DIR)
conv2tif.main(params)
prepifg.main(params)
params[C.REF_EST_METHOD] = 2
xlks, _, crop = pyrate.core.prepifg_helper.transform_params(params)
dest_paths, headers = common.repair_params_for_correct_tests(params[C.INTERFEROGRAM_DIR], params)
correct._copy_mlooked(params)
copied_dest_paths = [os.path.join(params[C.TEMP_MLOOKED_DIR], os.path.basename(d)) for d in dest_paths]
del dest_paths
# start run_pyrate copy
ifgs = common.pre_prepare_ifgs(copied_dest_paths, params)
mst_grid = common.mst_calculation(copied_dest_paths, params)
refx, refy = pyrate.core.refpixel.ref_pixel_calc_wrapper(params)
params[C.REFX] = refx
params[C.REFY] = refy
params[C.ORBFIT_OFFSET] = True
# Estimate and remove orbit errors
pyrate.core.orbital.remove_orbital_error(ifgs, params)
ifgs = common.prepare_ifgs_without_phase(copied_dest_paths, params)
for ifg in ifgs:
ifg.close()
correct._update_params_with_tiles(params)
_, ifgs = pyrate.core.ref_phs_est.ref_phase_est_wrapper(params)
ifgs[0].open()
r_dist = covariance.RDist(ifgs[0])()
ifgs[0].close()
# Calculate interferogram noise
maxvar = [covariance.cvd(i, params, r_dist)[0] for i in copied_dest_paths]
for ifg in ifgs:
ifg.open()
vcmt = covariance.get_vcmt(ifgs, maxvar)
for ifg in ifgs:
ifg.close()
ifg.open()
ifg.nodata_value = 0.0
params[C.TIME_SERIES_METHOD] = 2
params[C.PARALLEL] = 1
# Calculate time series
cls.tsincr, cls.tscum, _ = common.calculate_time_series(ifgs, params, vcmt, mst=mst_grid)
params[C.PARALLEL] = 0
# Calculate time series serailly by the pixel
cls.tsincr_0, cls.tscum_0, _ = common.calculate_time_series(ifgs, params, vcmt, mst=mst_grid)
# copy legacy data
SML_TIME_SERIES_DIR = os.path.join(common.SML_TEST_DIR, 'time_series')
tsincr_path = os.path.join(SML_TIME_SERIES_DIR, 'ts_incr_interp0_method2.csv')
ts_incr = np.genfromtxt(tsincr_path)
tscum_path = os.path.join(SML_TIME_SERIES_DIR, 'ts_cum_interp0_method2.csv')
ts_cum = np.genfromtxt(tscum_path)
cls.ts_incr = np.reshape(ts_incr, newshape=cls.tsincr_0.shape, order='F')
cls.ts_cum = np.reshape(ts_cum, newshape=cls.tscum_0.shape, order='F')
cls.params = params
@classmethod
def teardown_class(cls):
shutil.rmtree(cls.params[C.OUT_DIR])
def test_time_series_equality_parallel_by_rows(self):
assert self.tsincr.shape == self.tscum.shape
np.testing.assert_array_almost_equal(self.ts_incr, self.tsincr, decimal=1)
np.testing.assert_array_almost_equal(self.ts_cum, self.tscum, decimal=1)
def test_time_series_equality_serial_by_the_pixel(self):
assert self.tsincr_0.shape == self.tscum_0.shape
np.testing.assert_array_almost_equal(self.ts_incr, self.tsincr_0, decimal=3)
np.testing.assert_array_almost_equal(self.ts_cum, self.tscum_0, decimal=3)
class TestLinearRatePixel:
"""
Tests the linear regression algorithm for determining the best
fitting velocity from a cumulative time series
"""
def test_linear_rate_pixel_clean(self):
y = array([0, 2, 4, 6, 8, 10])
t = array([0, 1, 2, 3, 4, 5])
exp = (2.0, 0.0, 1.0, 0.0, 6)
res = linear_rate_pixel(y, t)
assert res == exp
def test_linear_rate_pixel_neg_rate(self):
y = array([0, -2, -4, -6, -8, -10])
t = array([0, 1, 2, 3, 4, 5])
exp = (-2.0, 0.0, 1.0, 0.0, 6)
res = linear_rate_pixel(y, t)
assert res == exp
def test_linear_rate_pixel_outlier(self):
y = array([0, 2, 4, 6, 8, 20])
t = array([0, 1, 2, 3, 4, 5])
exp = (3.428571, -1.904761, 0.812030, 0.824786, 6)
res = linear_rate_pixel(y, t)
assert res == pytest.approx(exp, rel=1e-6)
def test_linear_rate_pixel_noise(self):
y = array([0, 2, 4, 6, 8, 10])
r = y + np.random.rand(6) # add different uniform noise each time
t = array([0, 1, 2, 3, 4, 5])
exprate = 2.0
explsqd = 1.0
experr = 0.0
rate, _, lsqd, err, _ = linear_rate_pixel(y, t)
assert exprate == pytest.approx(rate, rel=1e-1)
assert explsqd == pytest.approx(lsqd, rel=1e-1)
assert experr == pytest.approx(err, rel=1e-1)
def test_linear_rate_pixel_exception(self):
# input vectors should be equal length
y = array([2, 4, 6, 8, 10])
t = array([0, 1, 2, 3, 4, 5])
with pytest.raises(TimeSeriesError):
res = linear_rate_pixel(y, t)
def test_linear_rate_pixel_nans(self):
# at least two obs are required for line fitting
y = array([0, nan, nan, nan, nan, nan])
t = array([0, 1, 2, 3, 4, 5])
exp = (nan, nan, nan, nan, nan)
res = linear_rate_pixel(y, t)
assert res == exp
class TestLinearRateArray:
"""
Tests the array loop wrapper for the linear regression algorithm using real data
"""
@classmethod
@pytest.fixture(autouse=True)
def setup_class(cls, roipac_params):
cls.params = roipac_params
cls.ifgs = common.small_data_setup()
# read in input (tscuml) and expected output arrays
tscuml_path = os.path.join(common.SML_TEST_LINRATE, "tscuml_0.npy")
cls.tscuml0 = np.load(tscuml_path)
# add zero epoch to tscuml 3D array
cls.tscuml = np.insert(cls.tscuml0, 0, 0, axis=2)
linrate_path = os.path.join(common.SML_TEST_LINRATE, "linear_rate.npy")
cls.linrate = np.load(linrate_path)
error_path = os.path.join(common.SML_TEST_LINRATE, "linear_error.npy")
cls.error = np.load(error_path)
icpt_path = os.path.join(common.SML_TEST_LINRATE, "linear_intercept.npy")
cls.icpt = np.load(icpt_path)
samp_path = os.path.join(common.SML_TEST_LINRATE, "linear_samples.npy")
cls.samp = np.load(samp_path)
rsq_path = os.path.join(common.SML_TEST_LINRATE, "linear_rsquared.npy")
cls.rsq = np.load(rsq_path)
def test_linear_rate_array(self):
"""
Input and expected output are on disk. This test only tests the linear_rate_array
and linear_rate_pixel functions using real data.
"""
l, i, r, e, s = linear_rate_array(self.tscuml, self.ifgs, self.params)
# test to 20 decimal places
assert_array_almost_equal(self.linrate, l, 1e-20)
assert_array_almost_equal(self.icpt, i, 1e-20)
assert_array_almost_equal(self.rsq, r, 1e-20)
assert_array_almost_equal(self.error, e, 1e-20)
assert_array_almost_equal(self.samp, s, 1e-20)
def test_linear_rate_array_two_sigma(self):
"""
Check that the "nsigma" switch in the config dictionary
actually results in a change in the error map.
"""
# make a deep copy of the params dict to avoid changing
# state for other tests if this one fails
params = deepcopy(self.params)
params[C.VELERROR_NSIG] = 2
_, _, _, e, _ = linear_rate_array(self.tscuml, self.ifgs, params)
assert_array_almost_equal(self.error*2, e, 1e-20)
def test_linear_rate_array_exception(self):
# depth of tscuml should equal nepochs
with pytest.raises(TimeSeriesError):
res = linear_rate_array(self.tscuml0, self.ifgs, self.params)
|
|
#!/usr/bin/env python
import argparse
import os
import shutil
import subprocess
import sys
class ConfigInstaller(object):
def __init__(self):
self.options = []
self.home = os.environ['HOME']
self.command = ''
def parse_arguments(self):
formatter = argparse.RawDescriptionHelpFormatter
uses = '''Examples:
*Install availability monitoring and centralized logging on localhost:
opstools-server-installation --am-hosts localhost --log-hosts localhost
*Install performance monitoring on a server IP:
opstools-server-installation --pm-hosts IP
*Install performance and availability monitoring on a server:
opstools-server-installation --pm-hosts IP --am-hosts IP
*Just create the inventory and overwritten if it exits:
opstools-server-installation -f --no-x '''
parser = argparse.ArgumentParser(prog='opstools-server-installation',
formatter_class=formatter,
description=(
'Script to set up the'
' operational servers.'
),
epilog=uses)
parser.add_argument('-i', '--inventory',
dest='opstoolinvpath',
help=(
'Opstool inventory path. '
'default= ~/.opstools.inventory'
),
default=self.home + '/.opstools.inventory')
parser.add_argument('--pm-hosts',
dest='pm_hosts',
action='append',
help=(
'Performance monitoring server IP. '
'default = []'
),
default=[])
parser.add_argument('--am-hosts',
dest='am_hosts',
action='append',
help=(
'Available monitoring server IP. '
'default = []'
),
default=[])
parser.add_argument('--log-hosts',
dest='logging_hosts',
action='append',
help=(
'Logging monitoring server IP. '
'default = localhost'
),
default=[])
parser.add_argument('-qs',
dest='ooo_ssh_ansible',
help=(
'Quickstart config path. '
'default=~/.quickstart/ssh.config.ansible'
),
default='{}/.quickstart/ssh.config.ansible'.format(
self.home))
parser.add_argument('--playbook',
dest='playbook',
type=str,
help='Playbook to run',
choices=('playbook.yml',
'playbook-post-install.yml'),
default='playbook.yml')
parser.add_argument('-f',
dest='force',
help='Overwrite the config file',
action='store_true',
default=False)
parser.add_argument('-e',
dest='parameters',
type=str,
help='Extra parameters file.yml')
parser.add_argument('--no-x',
dest='no_exec',
default=False,
action='store_true',
help='Dont run ansible-playbook')
parser.add_argument('--data-path',
dest='data_path',
default='/usr/share/opstools-ansible',
help=(
'Path where the playbooks, roles, inventory '
' and so on are located. default location is '
'/usr/share/opstools-ansible'
))
self.options = parser.parse_args()
self.parser = parser
def _parse_hosts(self):
hosts = {'all': [],
'split': {
'pm_hosts': [],
'am_hosts': [],
'logging_hosts': []
}}
for host_type in ('pm_hosts', 'am_hosts', 'logging_hosts'):
for host in getattr(self.options, host_type):
hosts['split'][host_type].append(host)
hosts['all'].append(host)
return hosts
def _create_hosts_file(self):
_path = os.path.join(self.options.opstoolinvpath, 'hosts')
hosts = self._parse_hosts()
if ((not os.path.exists(_path) or self.options.force) and
len(hosts['all']) > 0):
with open(_path, 'w') as hosts_file:
for host_type in hosts['split'].keys():
hosts_file.write('[{}]\n'.format(host_type))
for host in hosts['split'][host_type]:
hosts_file.write('{}\n'.format(host))
hosts_file.write('\n')
hosts_file.write("[targets]\n")
for host in set(hosts['all']):
if host in ('localhost', 'localhost6', '127.0.0.1', '::1'):
hosts_file.write('{} ansible_connection=local'
'\n'.format(host))
else:
hosts_file.write('{} ansible_connection=ssh '
'ansible_user=root\n'.format(
host
)
)
hosts_file.write('undercloud_host ansible_user=stack '
'ansible_host=undercloud '
'ansible_ssh_extra_args=\'-F {}\'\n'.format(
self.options.ooo_ssh_ansible
)
)
else:
print('\x1b[34m INFO: the inventory file was not'
' created/updated\x1b[0m')
def create_configuration(self):
try:
# destination file
_dest_f = os.path.join(self.options.opstoolinvpath,
'structure')
# origin file
_source_f = os.path.join(self.options.data_path,
'inventory',
'structure')
if not os.path.isdir(self.options.opstoolinvpath):
os.makedirs(self.options.opstoolinvpath)
if not os.path.exists(_dest_f) or self.options.force:
shutil.copy(_source_f, _dest_f)
self._create_hosts_file()
except Exception:
print('Error: There was a problem creating the inventory')
self.parser.print_help()
sys.exit(-1)
def create_command(self):
self.command = (
'ansible-playbook -i {inventory} {path}/{playbook}'.format(
inventory=self.options.opstoolinvpath,
path=self.options.data_path,
playbook=self.options.playbook
)
)
if self.options.parameters:
if os.path.exists(self.options.parameters):
self.command = '{command} -e {parameters}'.format(
command=self.command,
parameters=self.options.parameters)
else:
print('Error: The file {parameters} does not exits'
.format(parameters=self.options.parameters))
self.parser.print_usage()
sys.exit(-1)
def main():
config = ConfigInstaller()
config.parse_arguments()
config.create_configuration()
config.create_command()
if not config.options.no_exec:
subprocess.call(config.command, shell=True)
else:
print('To continue:\n\t{}'.format(config.command))
if __name__ == "__main__":
main()
|
|
"""
Tests for fixing the values of some parameters and estimating others
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pytest
from statsmodels import datasets
from statsmodels.tsa.statespace import (
initialization, mlemodel, sarimax, structural, dynamic_factor, varmax)
from numpy.testing import assert_, assert_raises, assert_equal, assert_allclose
macrodata = datasets.macrodata.load_pandas().data
def test_fix_params():
# Just create a dummy model to test the basic `fix_params` mechanics
mod = mlemodel.MLEModel([], 1)
mod._param_names = ['a', 'b', 'c']
with mod.fix_params({'b': 1.}):
assert_(mod._has_fixed_params)
assert_equal(mod._fixed_params, {'b': 1.})
assert_equal(mod._fixed_params_index, [1])
assert_equal(mod._free_params_index, [0, 2])
assert_(not mod._has_fixed_params)
assert_equal(mod._fixed_params, {})
assert_equal(mod._fixed_params_index, None)
assert_equal(mod._free_params_index, None)
def test_results_append():
endog = macrodata['infl']
endog1 = endog.iloc[:100]
endog2 = endog.iloc[100:]
mod_full = sarimax.SARIMAX(endog)
with mod_full.fix_params({'ar.L1': 0.5}):
res_full = mod_full.smooth([1.], includes_fixed=False)
# Start pretty close to optimum to speed up test
start_params = [10.3]
res_full_fit = mod_full.fit(start_params, disp=False)
mod = sarimax.SARIMAX(endog1)
with mod.fix_params({'ar.L1': 0.5}):
res1 = mod.smooth([1.], includes_fixed=False)
# Append should work outside the context manager, since the results object
# was created with fixed parameters
res2 = res1.append(endog2)
# Test to make sure refit works with fixed parameters
res2_fit = res1.append(endog2, refit=True, fit_kwargs={
'disp': False, 'start_params': res_full_fit.params})
# Check non-refit
assert_allclose(res2.params, res_full.params)
assert_equal(res2._fixed_params, res_full._fixed_params)
assert_allclose(res2.llf_obs, res_full.llf_obs)
# Check refit results
assert_allclose(res2_fit.params, res_full_fit.params)
assert_equal(res2_fit._fixed_params, res_full_fit._fixed_params)
assert_allclose(res2_fit.llf_obs, res_full_fit.llf_obs)
def test_results_extend():
endog = macrodata['infl']
endog1 = endog.iloc[:100]
endog2 = endog.iloc[100:]
mod_full = sarimax.SARIMAX(endog)
with mod_full.fix_params({'ar.L1': 0.5}):
res_full = mod_full.smooth([1.], includes_fixed=False)
mod = sarimax.SARIMAX(endog1)
with mod.fix_params({'ar.L1': 0.5}):
res1 = mod.smooth([1.], includes_fixed=False)
# Append should work outside the context manager, since the results object
# was created with fixed parameters
res2 = res1.append(endog2)
# Check results
assert_allclose(res2.params, res_full.params)
assert_equal(res2._fixed_params, res_full._fixed_params)
assert_allclose(res2.llf_obs, res_full.llf_obs)
def test_results_apply():
endog = macrodata['infl']
mod = sarimax.SARIMAX(endog)
with mod.fix_params({'ar.L1': 0.5}):
res = mod.smooth([1.], includes_fixed=False)
# Start pretty close to optimum to speed up test
start_params = [10.3]
res_fit = mod.fit(start_params, disp=False)
res2 = res.apply(endog)
# Test to make sure refit works with fixed parameters
res2_fit = res.apply(endog, refit=True, fit_kwargs={
'disp': False, 'start_params': res_fit.params})
# Check non-refit
assert_allclose(res2.params, res.params)
assert_equal(res2._fixed_params, res._fixed_params)
assert_allclose(res2.llf_obs, res.llf_obs)
# Check refit results
assert_allclose(res2_fit.params, res_fit.params)
assert_equal(res2_fit._fixed_params, res_fit._fixed_params)
assert_allclose(res2_fit.llf_obs, res_fit.llf_obs)
def test_sarimax_invalid():
# Test for invalid uses of parameter fixing
endog = macrodata['infl']
mod1 = sarimax.SARIMAX(endog, order=(2, 0, 0))
# Try to fix invalid parameter
assert_raises(ValueError, mod1.fit_constrained, {'AR.L1': 0.5})
# Cannot fix individual parameters that are part of a multivariate
# transformation
with pytest.raises(ValueError):
with mod1.fix_params({'ar.L1': 0.5}):
pass
assert_raises(ValueError, mod1.fit_constrained, {'ar.L1': 0.5})
# But can fix the entire set of parameters that are part of a multivariate
# transformation
with mod1.fix_params({'ar.L1': 0.5, 'ar.L2': 0.2}):
assert_(mod1._has_fixed_params)
assert_equal(mod1._fixed_params, {'ar.L1': 0.5, 'ar.L2': 0.2})
assert_equal(mod1._fixed_params_index, [0, 1])
assert_equal(mod1._free_params_index, [2])
res = mod1.fit_constrained({'ar.L1': 0.5, 'ar.L2': 0.2},
start_params=[7.0], disp=False)
assert_(res._has_fixed_params)
assert_equal(res._fixed_params, {'ar.L1': 0.5, 'ar.L2': 0.2})
assert_equal(res._fixed_params_index, [0, 1])
assert_equal(res._free_params_index, [2])
def test_structural_invalid():
# Test for invalid uses of parameter fixing
endog = macrodata['infl']
mod1 = structural.UnobservedComponents(endog, 'rwalk', ar=2)
# Try to fix invalid parameter
assert_raises(ValueError, mod1.fit_constrained, {'AR.L1': 0.5})
# Cannot fix parameters that are part of a multivariate transformation
with pytest.raises(ValueError):
with mod1.fix_params({'ar.L1': 0.5}):
pass
assert_raises(ValueError, mod1.fit_constrained, {'ar.L1': 0.5})
def test_dynamic_factor_invalid():
# Test for invalid uses of parameter fixing
endog = np.log(macrodata[['cpi', 'realgdp', 'realinv']]).diff().iloc[1:]
endog = (endog - endog.mean()) / endog.std()
# Basic model
mod1 = dynamic_factor.DynamicFactor(
endog, k_factors=1, factor_order=1, error_cov_type='diagonal')
# Check loading
constraints = {'loading.f1.cpi': 0.5}
with mod1.fix_params(constraints):
assert_(mod1._has_fixed_params)
assert_equal(mod1._fixed_params, constraints)
assert_equal(mod1._fixed_params_index, [0])
assert_equal(mod1._free_params_index, [1, 2, 3, 4, 5, 6])
res1 = mod1.fit_constrained(constraints, disp=False)
assert_(res1._has_fixed_params)
assert_equal(res1._fixed_params, constraints)
assert_equal(res1._fixed_params_index, [0])
assert_equal(res1._free_params_index, [1, 2, 3, 4, 5, 6])
# With k_factors=1 and factor_order=1, we can fix the factor AR coefficient
# even with `enforce_stationarity=True`.
# Fix factor AR coefficient
with mod1.fix_params({'L1.f1.f1': 0.5}):
assert_(mod1._has_fixed_params)
assert_equal(mod1._fixed_params, {'L1.f1.f1': 0.5})
assert_equal(mod1._fixed_params_index, [6])
assert_equal(mod1._free_params_index, [0, 1, 2, 3, 4, 5])
# With k_factors > 1 or factor_order > 1, we can only fix the entire set of
# factor AR coefficients when `enforce_stationarity=True`.
mod2 = dynamic_factor.DynamicFactor(
endog, k_factors=1, factor_order=2, error_cov_type='diagonal')
with pytest.raises(ValueError):
with mod2.fix_params({'L1.f1.f1': 0.5}):
pass
constraints = {'L1.f1.f1': 0.3, 'L2.f1.f1': 0.1}
with mod2.fix_params(constraints):
assert_(mod2._has_fixed_params)
assert_equal(mod2._fixed_params, constraints)
assert_equal(mod2._fixed_params_index, [6, 7])
assert_equal(mod2._free_params_index, [0, 1, 2, 3, 4, 5])
res2 = mod2.fit_constrained(constraints, disp=False)
assert_(res2._has_fixed_params)
assert_equal(res2._fixed_params, constraints)
assert_equal(res2._fixed_params_index, [6, 7])
assert_equal(res2._free_params_index, [0, 1, 2, 3, 4, 5])
# (same as previous, now k_factors=2)
mod3 = dynamic_factor.DynamicFactor(
endog, k_factors=2, factor_order=1, error_cov_type='diagonal')
with pytest.raises(ValueError):
with mod3.fix_params({'L1.f1.f1': 0.3}):
pass
constraints = dict([('L1.f1.f1', 0.3), ('L1.f2.f1', 0.1),
('L1.f1.f2', -0.05), ('L1.f2.f2', 0.1)])
with mod3.fix_params(constraints):
assert_(mod3._has_fixed_params)
assert_equal(mod3._fixed_params, constraints)
assert_equal(mod3._fixed_params_index, [9, 10, 11, 12])
assert_equal(mod3._free_params_index, [0, 1, 2, 3, 4, 5, 6, 7, 8])
res3 = mod3.fit_constrained(constraints, disp=False)
assert_(res3._has_fixed_params)
assert_equal(res3._fixed_params, constraints)
assert_equal(res3._fixed_params_index, [9, 10, 11, 12])
assert_equal(res3._free_params_index, [0, 1, 2, 3, 4, 5, 6, 7, 8])
# Now, with enforce_stationarity=False, we can fix any of the factor AR
# coefficients
mod4 = dynamic_factor.DynamicFactor(
endog, k_factors=1, factor_order=2, error_cov_type='diagonal',
enforce_stationarity=False)
with mod4.fix_params({'L1.f1.f1': 0.6}):
assert_(mod4._has_fixed_params)
assert_equal(mod4._fixed_params, {'L1.f1.f1': 0.6})
assert_equal(mod4._fixed_params_index, [6])
assert_equal(mod4._free_params_index, [0, 1, 2, 3, 4, 5, 7])
mod5 = dynamic_factor.DynamicFactor(
endog, k_factors=2, factor_order=1, error_cov_type='diagonal',
enforce_stationarity=False)
with mod5.fix_params({'L1.f1.f1': 0.6}):
assert_(mod5._has_fixed_params)
assert_equal(mod5._fixed_params, {'L1.f1.f1': 0.6})
assert_equal(mod5._fixed_params_index, [9])
assert_equal(mod5._free_params_index,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12])
# Check error variance
# (mod1 has error_cov_type='diagonal', so we can fix any that we like)
constraints = {'sigma2.cpi': 0.9, 'sigma2.realinv': 3}
with mod1.fix_params(constraints):
assert_(mod1._has_fixed_params)
assert_equal(mod1._fixed_params, constraints)
assert_equal(mod1._fixed_params_index, [3, 5])
assert_equal(mod1._free_params_index, [0, 1, 2, 4, 6])
res1 = mod1.fit_constrained(constraints, disp=False)
assert_(res1._has_fixed_params)
assert_equal(res1._fixed_params, constraints)
assert_equal(res1._fixed_params_index, [3, 5])
assert_equal(res1._free_params_index, [0, 1, 2, 4, 6])
# Check unstructured error variance
# (also reduce k_endog and fix some other parameters to make MLE go faster)
mod6 = dynamic_factor.DynamicFactor(
endog[['cpi', 'realgdp']], k_factors=1, factor_order=1,
error_cov_type='unstructured')
constraints = {
'loading.f1.cpi': 1., 'loading.f1.realgdp': 1., 'cov.chol[1,1]': 0.5,
'cov.chol[2,1]': 0.1}
with mod6.fix_params(constraints):
assert_(mod6._has_fixed_params)
assert_equal(mod6._fixed_params, constraints)
assert_equal(mod6._fixed_params_index, [0, 1, 2, 3])
assert_equal(mod6._free_params_index, [4, 5])
res6 = mod6.fit_constrained(constraints, disp=False)
assert_(res6._has_fixed_params)
assert_equal(res6._fixed_params, constraints)
assert_equal(res6._fixed_params_index, [0, 1, 2, 3])
assert_equal(res6._free_params_index, [4, 5])
def test_varmax_invalid():
# Test for invalid uses of parameter fixing
endog = np.log(macrodata[['cpi', 'realgdp']]).diff().iloc[1:]
exog = np.log(macrodata[['realinv']]).diff().iloc[1:]
# Basic model, VAR(1) + exog + measurement error
mod1 = varmax.VARMAX(endog, order=(1, 0), exog=exog,
measurement_error=True)
# Check intercept, exog
constraints = {'intercept.cpi': 0.5, 'intercept.realgdp': 1.1,
'beta.realinv.cpi': 0.2, 'beta.realinv.realgdp': 0.1,
'sqrt.var.cpi': 1.2, 'sqrt.cov.cpi.realgdp': -0.1,
'sqrt.var.realgdp': 2.3, 'measurement_variance.cpi': 0.4,
'measurement_variance.realgdp': 0.4}
with mod1.fix_params(constraints):
assert_(mod1._has_fixed_params)
assert_equal(mod1._fixed_params, constraints)
assert_equal(mod1._fixed_params_index, [0, 1, 6, 7, 8, 9, 10, 11, 12])
assert_equal(mod1._free_params_index, [2, 3, 4, 5])
res1 = mod1.fit_constrained(constraints, disp=False)
assert_(res1._has_fixed_params)
assert_equal(res1._fixed_params, constraints)
assert_equal(res1._fixed_params_index, [0, 1, 6, 7, 8, 9, 10, 11, 12])
assert_equal(res1._free_params_index, [2, 3, 4, 5])
# With k_endog=1 we can fix the factor AR coefficient
# even with `enforce_stationarity=True`.
# Fix factor AR coefficient
mod2 = varmax.VARMAX(endog[['cpi']], order=(1, 0), exog=exog,
measurement_error=True)
constraints = {'L1.cpi.cpi': 0.5}
with mod2.fix_params(constraints):
assert_(mod2._has_fixed_params)
assert_equal(mod2._fixed_params, constraints)
assert_equal(mod2._fixed_params_index, [1])
assert_equal(mod2._free_params_index, [0, 2, 3, 4])
# With k_ar > 1, we can only fix the entire set of AR coefficients when
# `enforce_stationarity=True`.
mod3 = varmax.VARMAX(endog[['cpi']], order=(2, 0))
with pytest.raises(ValueError):
with mod3.fix_params({'L1.cpi.cpi': 0.5}):
pass
constraints = {'L1.cpi.cpi': 0.3, 'L2.cpi.cpi': 0.1}
with mod3.fix_params(constraints):
assert_(mod3._has_fixed_params)
assert_equal(mod3._fixed_params, constraints)
assert_equal(mod3._fixed_params_index, [1, 2])
assert_equal(mod3._free_params_index, [0, 3])
res3 = mod3.fit_constrained(constraints, start_params=[0, 1.], disp=False)
assert_(res3._has_fixed_params)
assert_equal(res3._fixed_params, constraints)
assert_equal(res3._fixed_params_index, [1, 2])
assert_equal(res3._free_params_index, [0, 3])
# With k_endog > 1, we can only fix the entire set of AR coefficients when
# `enforce_stationarity=True`.
mod4 = varmax.VARMAX(endog, order=(1, 0))
with pytest.raises(ValueError):
with mod4.fix_params({'L1.cpi.cpi': 0.3}):
pass
constraints = dict([('L1.cpi.cpi', 0.3), ('L1.realgdp.cpi', 0.1),
('L1.cpi.realgdp', -0.05),
('L1.realgdp.realgdp', 0.1)])
with mod4.fix_params(constraints):
assert_(mod4._has_fixed_params)
assert_equal(mod4._fixed_params, constraints)
assert_equal(mod4._fixed_params_index, [2, 3, 4, 5])
assert_equal(mod4._free_params_index, [0, 1, 6, 7, 8])
res4 = mod4.fit_constrained(constraints, disp=False)
assert_(res4._has_fixed_params)
assert_equal(res4._fixed_params, constraints)
assert_equal(res4._fixed_params_index, [2, 3, 4, 5])
assert_equal(res4._free_params_index, [0, 1, 6, 7, 8])
# Now, with enforce_stationarity=False, we can fix any of the AR
# coefficients
mod5 = varmax.VARMAX(endog[['cpi']], ar_order=(2, 0),
enforce_stationarity=False)
with mod5.fix_params({'L1.cpi.cpi': 0.6}):
assert_(mod5._has_fixed_params)
assert_equal(mod5._fixed_params, {'L1.cpi.cpi': 0.6})
assert_equal(mod5._fixed_params_index, [1])
assert_equal(mod5._free_params_index, [0, 2])
# Now, with enforce_stationarity=False, we can fix any of the AR
# coefficients
mod6 = varmax.VARMAX(endog, ar_order=(1, 0),
enforce_stationarity=False)
with mod6.fix_params({'L1.cpi.cpi': 0.6}):
assert_(mod6._has_fixed_params)
assert_equal(mod6._fixed_params, {'L1.cpi.cpi': 0.6})
assert_equal(mod6._fixed_params_index, [2])
assert_equal(mod6._free_params_index, [0, 1, 3, 4, 5, 6, 7, 8])
def check_results(res1, res2, check_lutkepohl=False, check_params=True):
# Check other results
assert_allclose(res2.nobs, res1.nobs)
assert_allclose(res2.nobs_diffuse, res1.nobs_diffuse)
assert_allclose(res2.nobs_effective, res1.nobs_effective)
assert_allclose(res2.k_diffuse_states, res1.k_diffuse_states)
assert_allclose(res2.df_model, res1.df_model)
assert_allclose(res2.df_resid, res1.df_resid)
assert_allclose(res2.llf, res1.llf)
assert_allclose(res2.aic, res1.aic)
assert_allclose(res2.bic, res1.bic)
assert_allclose(res2.hqic, res1.hqic)
if check_lutkepohl:
assert_allclose(res2.info_criteria('aic', 'lutkepohl'),
res1.info_criteria('aic', 'lutkepohl'))
assert_allclose(res2.info_criteria('bic', 'lutkepohl'),
res1.info_criteria('bic', 'lutkepohl'))
assert_allclose(res2.info_criteria('hqic', 'lutkepohl'),
res1.info_criteria('hqic', 'lutkepohl'))
assert_allclose(res2.llf_obs, res1.llf_obs)
assert_allclose(res2.fittedvalues, res1.fittedvalues)
assert_allclose(res2.fittedvalues, res1.fittedvalues)
if check_params:
# Check parameter-related values
mask_free = res2._free_params_index
mask_fixed = res2._fixed_params_index
assert_allclose(res2.pvalues[mask_free], res1.pvalues)
assert_allclose(res2.pvalues[mask_fixed], np.nan)
assert_allclose(res2.bse[mask_free], res1.bse)
assert_allclose(res2.bse[mask_fixed], np.nan)
assert_allclose(res2.zvalues[mask_free], res1.zvalues)
assert_allclose(res2.zvalues[mask_fixed], np.nan)
# Check parameter covariance matrix
mask_free = np.ix_(res2._free_params_index, res2._free_params_index)
mask_fixed = np.ix_(res2._fixed_params_index, res2._fixed_params_index)
assert_allclose(res2.cov_params_default.values[mask_free],
res1.cov_params_default)
assert_allclose(res2.cov_params_default.values[mask_fixed], np.nan)
assert_allclose(res2.cov_params_approx.values[mask_free],
res1.cov_params_approx)
assert_allclose(res2.cov_params_approx.values[mask_fixed], np.nan)
assert_allclose(res2.cov_params_oim.values[mask_free],
res1.cov_params_oim)
assert_allclose(res2.cov_params_oim.values[mask_fixed], np.nan)
assert_allclose(res2.cov_params_opg.values[mask_free],
res1.cov_params_opg)
assert_allclose(res2.cov_params_opg.values[mask_fixed], np.nan)
assert_allclose(res2.cov_params_robust.values[mask_free],
res1.cov_params_robust)
assert_allclose(res2.cov_params_robust.values[mask_fixed], np.nan)
assert_allclose(res2.cov_params_robust_oim.values[mask_free],
res1.cov_params_robust_oim)
assert_allclose(res2.cov_params_robust_oim.values[mask_fixed], np.nan)
assert_allclose(res2.cov_params_robust_approx.values[mask_free],
res1.cov_params_robust_approx)
assert_allclose(res2.cov_params_robust_approx.values[mask_fixed],
np.nan)
# Check residual hypothesis tests
assert_allclose(res2.test_normality('jarquebera'),
res1.test_normality('jarquebera'))
assert_allclose(res2.test_heteroskedasticity('breakvar'),
res1.test_heteroskedasticity('breakvar'))
with pytest.warns(FutureWarning):
actual = res2.test_serial_correlation('ljungbox')
desired = res1.test_serial_correlation('ljungbox')
assert_allclose(actual, desired)
def test_sarimax_nonconsecutive():
# SARIMAX allows using non-consecutive lag orders, which implicitly fix
# AR coefficients to zeros, so we can test explicitly fixed AR coefficients
# against this
endog = macrodata['infl']
# y_t = \phi_1 y_{t-1} + \phi_4 y_{t-4} + \varepsilon_t
# Note: because the transformation will not respect the parameter
# constraints, we will need to set enforce_stationarity=False; set it to
# False here too so that they both are the same model
mod1 = sarimax.SARIMAX(endog, order=([1, 0, 0, 1], 0, 0),
enforce_stationarity=False)
mod2 = sarimax.SARIMAX(endog, order=(4, 0, 0), enforce_stationarity=False)
# Start pretty close to optimum to speed up test
start_params = [0.6, 0.2, 6.4]
res1 = mod1.fit(start_params, disp=False)
res2 = mod2.fit_constrained({'ar.L2': 0, 'ar.L3': 0}, res1.params,
includes_fixed=False, disp=False)
# Check that the right parameters were fixed
assert_equal(res1.fixed_params, [])
assert_equal(res2.fixed_params, ['ar.L2', 'ar.L3'])
# Check that MLE finds the same parameters in either case
desired = np.r_[res1.params[0], 0, 0, res1.params[1:]]
assert_allclose(res2.params, desired)
# Now smooth at the actual parameters (to allow high precision testing
# below, even if there are small differences between MLE fitted parameters)
with mod2.fix_params({'ar.L2': 0, 'ar.L3': 0}):
res2 = mod2.smooth(res1.params)
check_results(res1, res2, check_lutkepohl=True)
# Check that results methods work within the context
with mod2.fix_params({'ar.L2': 0, 'ar.L3': 0}):
res3 = mod2.filter(res2.params, includes_fixed=True)
check_results(res1, res3, check_lutkepohl=True)
def test_structural():
# Many of the forms of UnobservedComponents are just special cases of the
# most general model with parameter restrictions
endog = macrodata['infl']
# Local linear trend is a pretty general model, so we can test fixing
# parameters against other more specific models
# Local level is local linear trend with sigma2.trend = 0
mod1 = structural.UnobservedComponents(endog, 'llevel')
mod2 = structural.UnobservedComponents(endog, 'lltrend')
# Note: have to reinitialize, so the estimate of the trend term stays at
# zero.
init = initialization.Initialization(mod2.k_states)
init[0] = 'approximate_diffuse'
init.set(1, 'known', constant=[0])
mod2.ssm.initialization = init
mod2.ssm.loglikelihood_burn = 1
constraints = {'sigma2.trend': 0}
# Start pretty close to optimum to speed up test
start_params = [3.37, 0.74]
res1 = mod1.fit(start_params, disp=False)
res2 = mod2.fit_constrained(constraints, start_params=res1.params,
includes_fixed=False, disp=False)
# Check that the right parameters were fixed
assert_equal(res1.fixed_params, [])
assert_equal(res2.fixed_params, ['sigma2.trend'])
# Check that MLE finds the same parameters in either case
desired = np.r_[res1.params, 0]
assert_allclose(res2.params, desired)
# Now smooth at the actual parameters (to allow high precision testing
# below, even if there are small differences between MLE fitted parameters)
with mod2.fix_params(constraints):
res2 = mod2.smooth(res1.params)
check_results(res1, res2)
def test_dynamic_factor_diag_error_cov():
# Can test fixing the off-diagonal error covariance parameters to zeros
# with `error_cov_type='unstructured'` against the case
# `error_cov_type='diagonal'`.
endog = np.log(macrodata[['cpi', 'realgdp']]).diff().iloc[1:]
endog = (endog - endog.mean()) / endog.std()
# Basic model
mod1 = dynamic_factor.DynamicFactor(
endog, k_factors=1, factor_order=1, error_cov_type='diagonal')
mod2 = dynamic_factor.DynamicFactor(
endog, k_factors=1, factor_order=1, error_cov_type='unstructured')
constraints = {'cov.chol[2,1]': 0}
# Start pretty close to optimum to speed up test
start_params = [-4.5e-06, -1.0e-05, 9.9e-01, 9.9e-01, -1.4e-01]
res1 = mod1.fit(start_params=start_params, disp=False)
res2 = mod2.fit_constrained(constraints, start_params=res1.params,
includes_fixed=False, disp=False)
# Check that the right parameters were fixed
assert_equal(res1.fixed_params, [])
assert_equal(res2.fixed_params, ['cov.chol[2,1]'])
# Check that MLE finds the same parameters in either case
# (need to account for the fact that diagonal params are variances but
# unstructured params are standard deviations)
params = np.r_[res1.params[:2], res1.params[2:4]**0.5, res1.params[4]]
desired = np.r_[params[:3], 0, params[3:]]
assert_allclose(res2.params, desired, atol=1e-5)
# Now smooth at the actual parameters (to allow high precision testing
# below, even if there are small differences between MLE fitted parameters)
with mod2.fix_params(constraints):
res2 = mod2.smooth(params)
# Can't check some parameters-related values because of the different
# parameterization (i.e. cov_params, bse, pvalues, etc. won't match).
check_results(res1, res2, check_params=False)
def test_score_shape():
# Test that the `score()` output for fixed params has the same shape as the
# input vector
endog = macrodata['infl']
mod = sarimax.SARIMAX(endog, order=(1, 0, 0))
with mod.fix_params({'ar.L1': 0.5}):
score = mod.score([1.0])
assert_equal(score.shape, (1,))
|
|
from math import ceil
import numpy as np
from scipy.fftpack import fft, ifft, fftfreq
from ..utils import logger, verbose
@verbose
def stft(x, wsize, tstep=None, verbose=None):
"""STFT Short-Term Fourier Transform using a sine window.
The transformation is designed to be a tight frame that can be
perfectly inverted. It only returns the positive frequencies.
Parameters
----------
x : 2d array of size n_signals x T
containing multi-channels signal
wsize : int
length of the STFT window in samples (must be a multiple of 4)
tstep : int
step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2)
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
X : 3d array of shape [n_signals, wsize / 2 + 1, n_step]
STFT coefficients for positive frequencies with
n_step = ceil(T / tstep)
Usage
-----
X = stft(x, wsize)
X = stft(x, wsize, tstep)
See also
--------
istft
stftfreq
"""
if not np.isrealobj(x):
raise ValueError("x is not a real valued array")
if x.ndim == 1:
x = x[None, :]
n_signals, T = x.shape
wsize = int(wsize)
### Errors and warnings ###
if wsize % 4:
raise ValueError('The window length must be a multiple of 4.')
if tstep is None:
tstep = wsize / 2
tstep = int(tstep)
if (wsize % tstep) or (tstep % 2):
raise ValueError('The step size must be a multiple of 2 and a '
'divider of the window length.')
if tstep > wsize / 2:
raise ValueError('The step size must be smaller than half the '
'window length.')
n_step = int(ceil(T / float(tstep)))
n_freq = wsize // 2 + 1
logger.info("Number of frequencies: %d" % n_freq)
logger.info("Number of time steps: %d" % n_step)
X = np.zeros((n_signals, n_freq, n_step), dtype=np.complex)
if n_signals == 0:
return X
# Defining sine window
win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)
win2 = win ** 2
swin = np.zeros((n_step - 1) * tstep + wsize)
for t in range(n_step):
swin[t * tstep:t * tstep + wsize] += win2
swin = np.sqrt(wsize * swin)
# Zero-padding and Pre-processing for edges
xp = np.zeros((n_signals, wsize + (n_step - 1) * tstep),
dtype=x.dtype)
xp[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T] = x
x = xp
for t in range(n_step):
# Framing
wwin = win / swin[t * tstep: t * tstep + wsize]
frame = x[:, t * tstep: t * tstep + wsize] * wwin[None, :]
# FFT
fframe = fft(frame)
X[:, :, t] = fframe[:, :n_freq]
return X
def istft(X, tstep=None, Tx=None):
"""ISTFT Inverse Short-Term Fourier Transform using a sine window
Parameters
----------
X : 3d array of shape [n_signals, wsize / 2 + 1, n_step]
The STFT coefficients for positive frequencies
tstep : int
step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2)
Tx : int
Length of returned signal. If None Tx = n_step * tstep
Returns
-------
x : 1d array of length Tx
vector containing the inverse STFT signal
Usage
-----
x = istft(X)
x = istft(X, tstep)
See also
--------
stft
"""
### Errors and warnings ###
n_signals, n_win, n_step = X.shape
if (n_win % 2 == 0):
ValueError('The number of rows of the STFT matrix must be odd.')
wsize = 2 * (n_win - 1)
if tstep is None:
tstep = wsize / 2
if wsize % tstep:
raise ValueError('The step size must be a divider of two times the '
'number of rows of the STFT matrix minus two.')
if wsize % 2:
raise ValueError('The step size must be a multiple of 2.')
if tstep > wsize / 2:
raise ValueError('The step size must be smaller than the number of '
'rows of the STFT matrix minus one.')
if Tx is None:
Tx = n_step * tstep
T = n_step * tstep
x = np.zeros((n_signals, T + wsize - tstep), dtype=np.float)
if n_signals == 0:
return x[:, :Tx]
### Computing inverse STFT signal ###
# Defining sine window
win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)
# win = win / norm(win);
# Pre-processing for edges
swin = np.zeros(T + wsize - tstep, dtype=np.float)
for t in range(n_step):
swin[t * tstep:t * tstep + wsize] += win ** 2
swin = np.sqrt(swin / wsize)
fframe = np.empty((n_signals, n_win + wsize // 2 - 1), dtype=X.dtype)
for t in range(n_step):
# IFFT
fframe[:, :n_win] = X[:, :, t]
fframe[:, n_win:] = np.conj(X[:, wsize // 2 - 1: 0: -1, t])
frame = ifft(fframe)
wwin = win / swin[t * tstep:t * tstep + wsize]
# Overlap-add
x[:, t * tstep: t * tstep + wsize] += np.real(np.conj(frame) * wwin)
# Truncation
x = x[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T + 1][:, :Tx].copy()
return x
def stftfreq(wsize, sfreq=None):
"""Frequencies of stft transformation
Parameters
----------
wsize : int
Size of stft window
sfreq : float
Sampling frequency. If None the frequencies are given between 0 and pi
otherwise it's given in Hz.
Returns
-------
freqs : array
The positive frequencies returned by stft
See also
--------
stft
istft
"""
n_freq = wsize // 2 + 1
freqs = fftfreq(wsize)
freqs = np.abs(freqs[:n_freq])
if sfreq is not None:
freqs *= float(sfreq)
return freqs
def stft_norm2(X):
"""Compute L2 norm of STFT transform
It takes into account that stft only return positive frequencies.
As we use tight frame this quantity is conserved by the stft.
Parameters
----------
X : 3D complex array
The STFT transforms
Returns
-------
norms2 : array
The squared L2 norm of every raw of X.
"""
X2 = np.abs(X) ** 2
# compute all L2 coefs and remove freq zero once.
norms2 = (2. * X2.sum(axis=2).sum(axis=1) - np.sum(X2[:, 0, :], axis=1))
return norms2
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing images.
"""
from django.conf import settings
from django.forms import ValidationError # noqa
from django.forms.widgets import HiddenInput # noqa
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard import policy
IMAGE_BACKEND_SETTINGS = getattr(settings, 'OPENSTACK_IMAGE_BACKEND', {})
IMAGE_FORMAT_CHOICES = IMAGE_BACKEND_SETTINGS.get('image_formats', [])
def create_image_metadata(data):
"""Use the given dict of image form data to generate the metadata used for
creating the image in glance.
"""
# Glance does not really do anything with container_format at the
# moment. It requires it is set to the same disk_format for the three
# Amazon image types, otherwise it just treats them as 'bare.' As such
# we will just set that to be that here instead of bothering the user
# with asking them for information we can already determine.
disk_format = data['disk_format']
if disk_format in ('ami', 'aki', 'ari',):
container_format = disk_format
elif disk_format == 'docker':
# To support docker containers we allow the user to specify
# 'docker' as the format. In that case we really want to use
# 'raw' as the disk format and 'docker' as the container format.
disk_format = 'raw'
container_format = 'docker'
else:
container_format = 'bare'
# The Create form uses 'is_public' but the Update form uses 'public'. Just
# being tolerant here so we don't break anything else.
meta = {'is_public': data.get('is_public', data.get('public', False)),
'protected': data['protected'],
'disk_format': disk_format,
'container_format': container_format,
'min_disk': (data['minimum_disk'] or 0),
'min_ram': (data['minimum_ram'] or 0),
'name': data['name'],
'properties': {}}
if data['description']:
meta['properties']['description'] = data['description']
if data.get('kernel'):
meta['properties']['kernel_id'] = data['kernel']
if data.get('ramdisk'):
meta['properties']['ramdisk_id'] = data['ramdisk']
if data.get('architecture'):
meta['properties']['architecture'] = data['architecture']
return meta
class CreateImageForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
description = forms.CharField(max_length=255, label=_("Description"),
required=False)
source_type = forms.ChoiceField(
label=_('Image Source'),
required=False,
choices=[('url', _('Image Location')),
('file', _('Image File'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'source'}))
image_url = forms.URLField(label=_("Image Location"),
help_text=_("An external (HTTP) URL to load "
"the image from."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-url': _('Image Location'),
'ng-model': 'copyFrom',
'ng-change':
'ctrl.selectImageFormat(copyFrom)'}),
required=False)
image_file = forms.FileField(label=_("Image File"),
help_text=_("A local image to upload."),
widget=forms.FileInput(attrs={
'class': 'switched',
'data-switch-on': 'source',
'data-source-file': _('Image File'),
'ng-model': 'imageFile',
'ng-change':
'ctrl.selectImageFormat(imageFile.name)',
'image-file-on-change': None}),
required=False)
kernel = forms.ChoiceField(
label=_('Kernel'),
required=False,
widget=forms.SelectWidget(
transform=lambda x: "%s (%s)" % (
x.name, defaultfilters.filesizeformat(x.size))))
ramdisk = forms.ChoiceField(
label=_('Ramdisk'),
required=False,
widget=forms.SelectWidget(
transform=lambda x: "%s (%s)" % (
x.name, defaultfilters.filesizeformat(x.size))))
disk_format = forms.ChoiceField(label=_('Format'),
choices=[],
widget=forms.Select(attrs={
'class': 'switchable',
'ng-model': 'ctrl.diskFormat'}))
architecture = forms.CharField(max_length=255, label=_("Architecture"),
required=False)
minimum_disk = forms.IntegerField(
label=_("Minimum Disk (GB)"),
min_value=0,
help_text=_('The minimum disk size required to boot the image. '
'If unspecified, this value defaults to 0 (no minimum).'),
required=False)
minimum_ram = forms.IntegerField(
label=_("Minimum RAM (MB)"),
min_value=0,
help_text=_('The minimum memory size required to boot the image. '
'If unspecified, this value defaults to 0 (no minimum).'),
required=False)
is_copying = forms.BooleanField(
label=_("Copy Data"), initial=True, required=False,
help_text=_('Specify this option to copy image data to the image '
'service. If unspecified, image data will be used in its '
'current location.'),
widget=forms.CheckboxInput(attrs={
'class': 'switched',
'data-source-url': _('Image Location'),
'data-switch-on': 'source'}))
is_public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def __init__(self, request, *args, **kwargs):
super(CreateImageForm, self).__init__(request, *args, **kwargs)
if (not settings.HORIZON_IMAGES_ALLOW_UPLOAD or
not policy.check((("image", "upload_image"),), request)):
self._hide_file_source_type()
if not policy.check((("image", "set_image_location"),), request):
self._hide_url_source_type()
if not policy.check((("image", "publicize_image"),), request):
self._hide_is_public()
self.fields['disk_format'].choices = IMAGE_FORMAT_CHOICES
try:
kernel_images = api.glance.image_list_detailed(
request, filters={'disk_format': 'aki'})[0]
except Exception:
kernel_images = []
msg = _('Unable to retrieve image list.')
messages.error(request, msg)
if kernel_images:
choices = [('', _("Choose an image"))]
for image in kernel_images:
choices.append((image.id, image))
self.fields['kernel'].choices = choices
else:
del self.fields['kernel']
try:
ramdisk_images = api.glance.image_list_detailed(
request, filters={'disk_format': 'ari'})[0]
except Exception:
ramdisk_images = []
msg = _('Unable to retrieve image list.')
messages.error(request, msg)
if ramdisk_images:
choices = [('', _("Choose an image"))]
for image in ramdisk_images:
choices.append((image.id, image))
self.fields['ramdisk'].choices = choices
else:
del self.fields['ramdisk']
def _hide_file_source_type(self):
self.fields['image_file'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'file']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_url_source_type(self):
self.fields['image_url'].widget = HiddenInput()
source_type = self.fields['source_type']
source_type.choices = [choice for choice in source_type.choices
if choice[0] != 'url']
if len(source_type.choices) == 1:
source_type.widget = HiddenInput()
def _hide_is_public(self):
self.fields['is_public'].widget = HiddenInput()
self.fields['is_public'].initial = False
def clean(self):
data = super(CreateImageForm, self).clean()
# The image_file key can be missing based on particular upload
# conditions. Code defensively for it here...
image_file = data.get('image_file', None)
image_url = data.get('image_url', None)
if not image_url and not image_file:
raise ValidationError(
_("A image or external image location must be specified."))
elif image_url and image_file:
raise ValidationError(
_("Can not specify both image and external image location."))
else:
return data
def handle(self, request, data):
meta = create_image_metadata(data)
# Add image source file or URL to metadata
if (settings.HORIZON_IMAGES_ALLOW_UPLOAD and
policy.check((("image", "upload_image"),), request) and
data.get('image_file', None)):
meta['data'] = self.files['image_file']
elif data['is_copying']:
meta['copy_from'] = data['image_url']
else:
meta['location'] = data['image_url']
try:
image = api.glance.image_create(request, **meta)
messages.success(request,
_('Your image %s has been queued for creation.') %
meta['name'])
return image
except Exception as e:
msg = _('Unable to create new image')
# TODO(nikunj2512): Fix this once it is fixed in glance client
if hasattr(e, 'code') and e.code == 400:
if "Invalid disk format" in e.details:
msg = _('Unable to create new image: Invalid disk format '
'%s for image.') % meta['disk_format']
elif "Image name too long" in e.details:
msg = _('Unable to create new image: Image name too long.')
exceptions.handle(request, msg)
return False
class UpdateImageForm(forms.SelfHandlingForm):
image_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=_("Name"))
description = forms.CharField(max_length=255, label=_("Description"),
required=False)
kernel = forms.CharField(
max_length=36,
label=_("Kernel ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
ramdisk = forms.CharField(
max_length=36,
label=_("Ramdisk ID"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
architecture = forms.CharField(
label=_("Architecture"),
required=False,
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
)
disk_format = forms.ChoiceField(
label=_("Format"),
)
minimum_disk = forms.IntegerField(label=_("Minimum Disk (GB)"),
min_value=0,
help_text=_('The minimum disk size'
' required to boot the'
' image. If unspecified,'
' this value defaults to'
' 0 (no minimum).'),
required=False)
minimum_ram = forms.IntegerField(label=_("Minimum RAM (MB)"),
min_value=0,
help_text=_('The minimum memory size'
' required to boot the'
' image. If unspecified,'
' this value defaults to'
' 0 (no minimum).'),
required=False)
public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def __init__(self, request, *args, **kwargs):
super(UpdateImageForm, self).__init__(request, *args, **kwargs)
self.fields['disk_format'].choices = [(value, name) for value,
name in IMAGE_FORMAT_CHOICES
if value]
if not policy.check((("image", "publicize_image"),), request):
self.fields['public'].widget = forms.CheckboxInput(
attrs={'readonly': 'readonly'})
def handle(self, request, data):
image_id = data['image_id']
error_updating = _('Unable to update image "%s".')
meta = create_image_metadata(data)
# Ensure we do not delete properties that have already been
# set on an image.
meta['purge_props'] = False
try:
image = api.glance.image_update(request, image_id, **meta)
messages.success(request, _('Image was successfully updated.'))
return image
except Exception:
exceptions.handle(request, error_updating % image_id)
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import logging
import random
import time
import six
from oslo.config import cfg
from oslo.messaging._drivers import amqp as rpc_amqp
from oslo.messaging._drivers import amqpdriver
from oslo.messaging._drivers import common as rpc_common
from oslo.messaging import exceptions
from oslo.messaging.openstack.common.gettextutils import _
from oslo.messaging.openstack.common import jsonutils
from oslo.utils import importutils
from oslo.utils import netutils
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname.'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port.'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs.'),
cfg.StrOpt('qpid_username',
default='',
help='Username for Qpid connection.'),
cfg.StrOpt('qpid_password',
default='',
help='Password for Qpid connection.',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for '
'auth.'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats.'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'."),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Whether to disable the Nagle algorithm.'),
cfg.IntOpt('qpid_receiver_capacity',
default=1,
help='The number of prefetched messages held by receiver.'),
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
# this file could probably use some additional refactoring so that the
# differences between each version are split into different classes.
cfg.IntOpt('qpid_topology_version',
default=1,
help="The qpid topology version to use. Version 1 is what "
"was originally used by impl_qpid. Version 2 includes "
"some backwards-incompatible changes that allow broker "
"federation to work. Users should update to version 2 "
"when they are able to take everything down, as it "
"requires a clean break."),
]
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
def raise_invalid_topology_version(conf):
msg = (_("Invalid value for qpid_topology_version: %d") %
conf.qpid_topology_version)
LOG.error(msg)
raise Exception(msg)
class QpidMessage(dict):
def __init__(self, session, raw_message):
super(QpidMessage, self).__init__(
rpc_common.deserialize_msg(raw_message.content))
self._raw_message = raw_message
self._session = session
def acknowledge(self):
self._session.acknowledge(self._raw_message)
def requeue(self):
pass
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, conf, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.rcv_capacity = conf.qpid_receiver_capacity
self.session = None
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
elif conf.qpid_topology_version == 2:
addr_opts = {
"link": {
"x-declare": {
"auto-delete": True,
"exclusive": False,
},
},
}
else:
raise_invalid_topology_version(conf)
addr_opts["link"]["x-declare"].update(link_opts)
if link_name:
addr_opts["link"]["name"] = link_name
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the receiver on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a Qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = self.rcv_capacity
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
self.callback(QpidMessage(self.session, message))
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
link_name = msg_id
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
link_name = msg_id
else:
raise_invalid_topology_version(conf)
super(DirectConsumer, self).__init__(conf, session, callback,
node_name, node_opts, link_name,
link_opts)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, exchange_name,
name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version(conf)
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
{}, name or topic, link_opts)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
link_opts = {"exclusive": True}
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"durable": False, "type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version(conf)
super(FanoutConsumer, self).__init__(conf, session, callback,
node_name, node_opts, None,
link_opts)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, conf, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
elif conf.qpid_topology_version == 2:
self.address = node_name
else:
raise_invalid_topology_version(conf)
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, topic):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (topic, topic)
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version(conf)
super(DirectPublisher, self).__init__(conf, session, node_name,
node_opts)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, exchange_name, topic):
"""Init a 'topic' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version(conf)
super(TopicPublisher, self).__init__(conf, session, node_name)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""Init a 'fanout' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version(conf)
super(FanoutPublisher, self).__init__(conf, session, node_name,
node_opts)
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, exchange_name, topic):
"""Init a 'topic' publisher.
"""
node_opts = {"durable": True}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version(conf)
super(NotifyPublisher, self).__init__(conf, session, node_name,
node_opts)
class Connection(object):
"""Connection object."""
pools = {}
def __init__(self, conf, url, purpose):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.connection = None
self.session = None
self.consumers = {}
self.conf = conf
self._consume_loop_stopped = False
self.brokers_params = []
if url.hosts:
for host in url.hosts:
params = {
'username': host.username or '',
'password': host.password or '',
}
if host.port is not None:
params['host'] = '%s:%d' % (host.hostname, host.port)
else:
params['host'] = host.hostname
self.brokers_params.append(params)
else:
# Old configuration format
for adr in self.conf.qpid_hosts:
hostname, port = netutils.parse_host_port(
adr, default_port=5672)
params = {
'host': '%s:%d' % (hostname, port),
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
self.brokers_params.append(params)
random.shuffle(self.brokers_params)
self.brokers = itertools.cycle(self.brokers_params)
self.reconnect()
def _connect(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker['host'])
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = broker['username']
self.connection.password = broker['password']
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
self.connection.open()
def _register_consumer(self, consumer):
self.consumers[six.text_type(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[six.text_type(receiver)]
def _disconnect(self):
# Close the session if necessary
if self.connection is not None and self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.MessagingError:
pass
self.connection = None
def reconnect(self, retry=None):
"""Handles reconnecting and re-establishing sessions and queues.
Will retry up to retry number of times.
retry = None or -1 means to retry forever
retry = 0 means no retry
retry = N means N retries
"""
delay = 1
attempt = 0
loop_forever = False
if retry is None or retry < 0:
loop_forever = True
while True:
self._disconnect()
attempt += 1
broker = six.next(self.brokers)
try:
self._connect(broker)
except qpid_exceptions.MessagingError as e:
msg_dict = dict(e=e,
delay=delay,
retry=retry,
broker=broker)
if not loop_forever and attempt > retry:
msg = _('Unable to connect to AMQP server on '
'%(broker)s after %(retry)d '
'tries: %(e)s') % msg_dict
LOG.error(msg)
raise exceptions.MessageDeliveryFailure(msg)
else:
msg = _("Unable to connect to AMQP server on %(broker)s: "
"%(e)s. Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(delay + 1, 5)
else:
LOG.info(_('Connected to AMQP server on %s'), broker['host'])
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in six.itervalues(consumers):
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug("Re-established AMQP queues")
def ensure(self, error_callback, method, retry=None):
while True:
try:
return method()
except (qpid_exceptions.Empty,
qpid_exceptions.MessagingError) as e:
if error_callback:
error_callback(e)
self.reconnect(retry=retry)
def close(self):
"""Close/release this connection."""
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': exc}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s"), log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
timer = rpc_common.DecayingTimer(duration=timeout)
timer.start()
def _raise_timeout(exc):
LOG.debug('Timed out waiting for RPC response: %s', exc)
raise rpc_common.Timeout()
def _error_callback(exc):
timer.check_return(_raise_timeout, exc)
LOG.exception(_('Failed to consume message from queue: %s'), exc)
def _consume():
# NOTE(sileht):
# maximun value choosen according the best practice from kombu:
# http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop
poll_timeout = 1 if timeout is None else min(timeout, 1)
while True:
if self._consume_loop_stopped:
self._consume_loop_stopped = False
raise StopIteration
try:
nxt_receiver = self.session.next_receiver(
timeout=poll_timeout)
except qpid_exceptions.Empty as exc:
poll_timeout = timer.check_return(_raise_timeout, exc,
maximum=1)
else:
break
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. "
"Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def publisher_send(self, cls, topic, msg, retry=None, **kwargs):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': exc}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s"), log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic=topic, **kwargs)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send, retry=retry)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, exchange_name, topic, callback=None,
queue_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, topic=msg_id, msg=msg)
def topic_send(self, exchange_name, topic, msg, timeout=None, retry=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, for example a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual Qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# Qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic=topic, msg=qpid_message,
exchange_name=exchange_name, retry=retry)
def fanout_send(self, topic, msg, retry=None):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic=topic, msg=msg, retry=retry)
def notify_send(self, exchange_name, topic, msg, retry=None, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic=topic, msg=msg,
exchange_name=exchange_name, retry=retry)
def consume(self, limit=None, timeout=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit, timeout=timeout)
while True:
try:
six.next(it)
except StopIteration:
return
def stop_consuming(self):
self._consume_loop_stopped = True
class QpidDriver(amqpdriver.AMQPDriverBase):
def __init__(self, conf, url,
default_exchange=None, allowed_remote_exmods=None):
conf.register_opts(qpid_opts)
conf.register_opts(rpc_amqp.amqp_opts)
connection_pool = rpc_amqp.get_connection_pool(conf, url, Connection)
super(QpidDriver, self).__init__(conf, url,
connection_pool,
default_exchange,
allowed_remote_exmods)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Transparent image file caching middleware, designed to live on
Glance API nodes. When images are requested from the API node,
this middleware caches the returned image file to local filesystem.
When subsequent requests for the same image file are received,
the local cached copy of the image file is returned.
"""
import re
import six
from oslo_log import log as logging
from six.moves import http_client as http
import webob
from glance.api.common import size_checked_iter
from glance.api import policy
from glance.api.v1 import images
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
from glance.i18n import _LE, _LI
from glance import image_cache
from glance import notifier
import glance.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
PATTERNS = {
('v1', 'GET'): re.compile(r'^/v1/images/([^\/]+)$'),
('v1', 'DELETE'): re.compile(r'^/v1/images/([^\/]+)$'),
('v2', 'GET'): re.compile(r'^/v2/images/([^\/]+)/file$'),
('v2', 'DELETE'): re.compile(r'^/v2/images/([^\/]+)$')
}
class CacheFilter(wsgi.Middleware):
def __init__(self, app):
self.cache = image_cache.ImageCache()
self.serializer = images.ImageSerializer()
self.policy = policy.Enforcer()
LOG.info(_LI("Initialized image cache middleware"))
super(CacheFilter, self).__init__(app)
def _verify_metadata(self, image_meta):
"""
Sanity check the 'deleted' and 'size' metadata values.
"""
# NOTE: admins can see image metadata in the v1 API, but shouldn't
# be able to download the actual image data.
if image_meta['status'] == 'deleted' and image_meta['deleted']:
raise exception.NotFound()
if not image_meta['size']:
# override image size metadata with the actual cached
# file size, see LP Bug #900959
image_meta['size'] = self.cache.get_image_size(image_meta['id'])
@staticmethod
def _match_request(request):
"""Determine the version of the url and extract the image id
:returns: tuple of version and image id if the url is a cacheable,
otherwise None
"""
for ((version, method), pattern) in PATTERNS.items():
if request.method != method:
continue
match = pattern.match(request.path_info)
if match is None:
continue
image_id = match.group(1)
# Ensure the image id we got looks like an image id to filter
# out a URI like /images/detail. See LP Bug #879136
if image_id != 'detail':
return (version, method, image_id)
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden as e:
LOG.debug("User not permitted to perform '%s' action", action)
raise webob.exc.HTTPForbidden(explanation=e.msg, request=req)
def _get_v1_image_metadata(self, request, image_id):
"""
Retrieves image metadata using registry for v1 api and creates
dictionary-like mash-up of image core and custom properties.
"""
try:
image_metadata = registry.get_image_metadata(request.context,
image_id)
return utils.create_mashup_dict(image_metadata)
except exception.NotFound as e:
LOG.debug("No metadata found for image '%s'", image_id)
raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def _get_v2_image_metadata(self, request, image_id):
"""
Retrieves image and for v2 api and creates adapter like object
to access image core or custom properties on request.
"""
db_api = glance.db.get_api()
image_repo = glance.db.ImageRepo(request.context, db_api)
try:
image = image_repo.get(image_id)
# Storing image object in request as it is required in
# _process_v2_request call.
request.environ['api.cache.image'] = image
return policy.ImageTarget(image)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg, request=request)
def process_request(self, request):
"""
For requests for an image file, we check the local image
cache. If present, we return the image file, appending
the image metadata in headers. If not present, we pass
the request on to the next application in the pipeline.
"""
match = self._match_request(request)
try:
(version, method, image_id) = match
except TypeError:
# Trying to unpack None raises this exception
return None
self._stash_request_info(request, image_id, method, version)
# Partial image download requests shall not be served from cache
# Bug: 1664709
# TODO(dharinic): If an image is already cached, add support to serve
# only the requested bytes (partial image download) from the cache.
if (request.headers.get('Content-Range') or
request.headers.get('Range')):
return None
if request.method != 'GET' or not self.cache.is_cached(image_id):
return None
method = getattr(self, '_get_%s_image_metadata' % version)
image_metadata = method(request, image_id)
# Deactivated images shall not be served from cache
if image_metadata['status'] == 'deactivated':
return None
try:
self._enforce(request, 'download_image', target=image_metadata)
except exception.Forbidden:
return None
LOG.debug("Cache hit for image '%s'", image_id)
image_iterator = self.get_from_cache(image_id)
method = getattr(self, '_process_%s_request' % version)
try:
return method(request, image_id, image_iterator, image_metadata)
except exception.ImageNotFound:
msg = _LE("Image cache contained image file for image '%s', "
"however the registry did not contain metadata for "
"that image!") % image_id
LOG.error(msg)
self.cache.delete_cached_image(image_id)
@staticmethod
def _stash_request_info(request, image_id, method, version):
"""
Preserve the image id, version and request method for later retrieval
"""
request.environ['api.cache.image_id'] = image_id
request.environ['api.cache.method'] = method
request.environ['api.cache.version'] = version
@staticmethod
def _fetch_request_info(request):
"""
Preserve the cached image id, version for consumption by the
process_response method of this middleware
"""
try:
image_id = request.environ['api.cache.image_id']
method = request.environ['api.cache.method']
version = request.environ['api.cache.version']
except KeyError:
return None
else:
return (image_id, method, version)
def _process_v1_request(self, request, image_id, image_iterator,
image_meta):
# Don't display location
if 'location' in image_meta:
del image_meta['location']
image_meta.pop('location_data', None)
self._verify_metadata(image_meta)
response = webob.Response(request=request)
raw_response = {
'image_iterator': image_iterator,
'image_meta': image_meta,
}
return self.serializer.show(response, raw_response)
def _process_v2_request(self, request, image_id, image_iterator,
image_meta):
# We do some contortions to get the image_metadata so
# that we can provide it to 'size_checked_iter' which
# will generate a notification.
# TODO(mclaren): Make notification happen more
# naturally once caching is part of the domain model.
image = request.environ['api.cache.image']
self._verify_metadata(image_meta)
response = webob.Response(request=request)
response.app_iter = size_checked_iter(response, image_meta,
image_meta['size'],
image_iterator,
notifier.Notifier())
# NOTE (flwang): Set the content-type, content-md5 and content-length
# explicitly to be consistent with the non-cache scenario.
# Besides, it's not worth the candle to invoke the "download" method
# of ResponseSerializer under image_data. Because method "download"
# will reset the app_iter. Then we have to call method
# "size_checked_iter" to avoid missing any notification. But after
# call "size_checked_iter", we will lose the content-md5 and
# content-length got by the method "download" because of this issue:
# https://github.com/Pylons/webob/issues/86
response.headers['Content-Type'] = 'application/octet-stream'
if image.checksum:
response.headers['Content-MD5'] = (image.checksum.encode('utf-8')
if six.PY2 else image.checksum)
response.headers['Content-Length'] = str(image.size)
return response
def process_response(self, resp):
"""
We intercept the response coming back from the main
images Resource, removing image file from the cache
if necessary
"""
status_code = self.get_status_code(resp)
if not 200 <= status_code < 300:
return resp
# Note(dharinic): Bug: 1664709: Do not cache partial images.
if status_code == http.PARTIAL_CONTENT:
return resp
try:
(image_id, method, version) = self._fetch_request_info(
resp.request)
except TypeError:
return resp
if method == 'GET' and status_code == http.NO_CONTENT:
# Bugfix:1251055 - Don't cache non-existent image files.
# NOTE: Both GET for an image without locations and DELETE return
# 204 but DELETE should be processed.
return resp
method_str = '_process_%s_response' % method
try:
process_response_method = getattr(self, method_str)
except AttributeError:
LOG.error(_LE('could not find %s') % method_str)
# Nothing to do here, move along
return resp
else:
return process_response_method(resp, image_id, version=version)
def _process_DELETE_response(self, resp, image_id, version=None):
if self.cache.is_cached(image_id):
LOG.debug("Removing image %s from cache", image_id)
self.cache.delete_cached_image(image_id)
return resp
def _process_GET_response(self, resp, image_id, version=None):
image_checksum = resp.headers.get('Content-MD5')
if not image_checksum:
# API V1 stores the checksum in a different header:
image_checksum = resp.headers.get('x-image-meta-checksum')
if not image_checksum:
LOG.error(_LE("Checksum header is missing."))
# fetch image_meta on the basis of version
image_metadata = None
if version:
method = getattr(self, '_get_%s_image_metadata' % version)
image_metadata = method(resp.request, image_id)
# NOTE(zhiyan): image_cache return a generator object and set to
# response.app_iter, it will be called by eventlet.wsgi later.
# So we need enforce policy firstly but do it by application
# since eventlet.wsgi could not catch webob.exc.HTTPForbidden and
# return 403 error to client then.
self._enforce(resp.request, 'download_image', target=image_metadata)
resp.app_iter = self.cache.get_caching_iter(image_id, image_checksum,
resp.app_iter)
return resp
def get_status_code(self, response):
"""
Returns the integer status code from the response, which
can be either a Webob.Response (used in testing) or httplib.Response
"""
if hasattr(response, 'status_int'):
return response.status_int
return response.status
def get_from_cache(self, image_id):
"""Called if cache hit"""
with self.cache.open_for_read(image_id) as cache_file:
chunks = utils.chunkiter(cache_file)
for chunk in chunks:
yield chunk
|
|
#! /usr/bin/env python
import sys
import os.path
import re
import uuid
import signal
import fileinput
import subprocess
from argparse import ArgumentParser
import dnapilib
from dnapilib.io_utils import get_file_obj
from dnapilib.apred import adapter_prediction
from dnapilib.apred import iterative_adapter_prediction
from dnapilib.exhaust import rm_temp_dir
from dnapilib.exhaust import fastq_input_prep
from dnapilib.exhaust import map_clean_reads
from dnapilib.exhaust import make_stats_report
TEMP_DIR = None
MAP_TO_GENOME = False
SAMPLE_NUM = 50000
def convert_interval(s_in, s_op, func):
"""Return range of kmers or filtering ratios.
"""
msg = "bad {}: {} {}"
try:
s = list(map(func, s_in.split(":")))
except:
raise Exception(msg.format("value", s_op, s_in))
if len(s) == 1:
return s
if len(s) == 3:
beg, end, interval = s
values = []
while beg < end:
values.append(beg)
beg += interval
values.append(end)
return values
else:
raise Exception(msg.format("interval", s_op, s_in))
def parse_args():
"""Return options and required arguments.
"""
parser = ArgumentParser(
usage="%(prog)s [options] FASTQ",
description="Predict or evaluate 3'adapter sequence(s)",
epilog="Report bug to: Junko Tsuji <jnktsj@gmail.com>")
parser.add_argument("FASTQ",
type=str,
help="including stdin or compressed file {zip,gz,tar,bz}")
parser.add_argument("--version", action="version",
version="%(prog)s {}".format(dnapilib.__version__))
predop = parser.add_argument_group("adapter prediction parameters")
predop.add_argument("-k",
metavar="[KMER_BEG:KMER_END:INCREMENT | KMER_LEN]",
default="9:11:2",
help="range of kmers or a single kmer to predict 3'adapters "
"(default: %(default)s)")
predop.add_argument("-r",
metavar="[RATIO_BEG:RATIO_END:INTCREMENT | RATIO]",
default="1.2:1.4:0.1",
help="range of ratios or a single ratio to filter less abundant kmers"
" (default: %(default)s)")
predop.add_argument("--show-all",
action="store_true",
help="show other candidates if any")
exhaop = parser.add_argument_group("exhaustive adapter search")
exhaop.add_argument("--map-command",
metavar="COMMAND",
default=None,
help="read mapping command to be tested")
exhaop.add_argument("--subsample-rate",
metavar="FLOAT",
default=1.0, type=float,
help="subsampling fraction of reads (default: %(default)s)")
exhaop.add_argument("--output-dir",
metavar="DIRECTORY",
default="./dnapi_out",
help="output directory to write report and cleansed reads"
" (default: ./dnapi_out)")
exhaop.add_argument("--no-output-files",
action="store_true",
help="only display report and suppress output files")
exhaop.add_argument("--temp-dir",
metavar="DIRECTORY",
default="/tmp",
help="place to make temporary directory (default: %(default)s)")
evalop = parser.add_argument_group("evaluation of candidate adapters")
evalop.add_argument("--adapter-seq",
dest="seq", nargs="+",
default=None,
help="list of 3'adapters for evaluation")
adrmop = parser.add_argument_group("adapter removal parameters")
adrmop.add_argument("--prefix-match",
metavar="LENGTH",
default=7, type=int,
help="3'adapter match length to trim (default: %(default)s)")
adrmop.add_argument("--min-len",
metavar="LENGTH",
default=16, type=int,
help="minimum read length to keep for mapping (default: %(default)s)")
adrmop.add_argument("--max-len",
metavar="LENGTH",
default=36, type=int,
help="maximum read length to keep for mapping (default: %(default)s)")
adrmop.add_argument("--trim-5p",
metavar="LENGTH",
default=0, type=int,
help="trim specified number of bases from 5'ends after adapter removal"
" (default: %(default)s)")
adrmop.add_argument("--trim-3p",
metavar="LENGTH",
default=0, type=int,
help="trim specified number of bases from 3'ends after adapter removal"
" (default: %(default)s)")
args = parser.parse_args()
if args.map_command:
err_find = "can't find {}"
soft = os.path.expanduser(args.map_command.split()[0])
if os.path.dirname(soft):
if not os.path.exists(soft):
raise Exception(err_find.format(soft))
else:
try:
subprocess.call("which {}".format(soft).split())
except OSError:
raise Exception(err_find.format(soft))
if not re.findall("@in", args.map_command):
raise Exception("can't locate input argument: @in")
if not re.findall("@out", args.map_command):
raise Exception("can't locate output argument: @out")
if args.prefix_match <= 0:
raise Exception("bad value: --prefix-match")
if args.min_len <= 0:
raise Exception("bad value: --min-len")
if args.max_len <= 0:
raise Exception("bad value: --max-len")
if args.trim_5p < 0:
raise Exception("bad value: --trim-5p")
if args.trim_3p < 0:
raise Exception("bad value: --trim-3p")
if args.subsample_rate <= 0 or 1 < args.subsample_rate:
raise Exception("bad subsampling rate")
global MAP_TO_GENOME
MAP_TO_GENOME = True
return args
def main():
args = parse_args()
fastq = args.FASTQ
Ks = convert_interval(args.k, "-k", int)
Rs = convert_interval(args.r, "-r", float)
if not MAP_TO_GENOME:
if len(Ks) > 1 or len(Rs) > 1:
adapts = iterative_adapter_prediction(fastq, Rs, Ks, SAMPLE_NUM)
else:
adapts = adapter_prediction(fastq, Rs[0], Ks[0], SAMPLE_NUM)
if args.show_all:
for x in adapts:
print("{}\tscore={:.2f}".format(*x))
else:
print(adapts[0][0])
else:
global TEMP_DIR
TEMP_DIR = "{}/DNApi_tmp_{}".format(
args.temp_dir, str(uuid.uuid4()))
subprocess.call(("mkdir {}".format(TEMP_DIR)).split())
original_fastq = fastq
fastq, total_read, sd = fastq_input_prep(
fastq, args.subsample_rate, TEMP_DIR)
if args.seq:
adapts = set(args.seq)
setstr = ["user-input" for i in range(len(adapts))]
else:
msg = "warning: predicted adapter is too short (<{0}): '{1}'\n" \
+ "warning: '{1}' will not be further investigated\n"
params = {}
for k in Ks:
for r in Rs:
aout = adapter_prediction(fastq, r, k, SAMPLE_NUM)[0][0]
if len(aout) < args.prefix_match:
sys.stderr.write(msg.format(l, s))
continue
aseq = aout[: args.prefix_match+5]
params.setdefault(aseq,[]).append("{}:{:.1f}".format(k,r))
adapts = list(params.keys())
setstr = [';'.join(s) for s in params.values()]
adapts.append("RAW_INPUT")
setstr.append("NO_TREATMENT")
if not adapts:
raise Exception("no valid adapters to further process")
table = []
for i, aseq in enumerate(adapts):
cnts = map_clean_reads(
fastq, aseq[:args.prefix_match], args.trim_5p,
args.trim_3p, args.min_len, args.max_len,
args.map_command, TEMP_DIR)
read_stats = [c / total_read * 100 for c in cnts]
table.append([aseq, cnts[0], read_stats[0],
cnts[1], read_stats[1], setstr[i]])
make_stats_report(
table, total_read, args.subsample_rate, args.prefix_match,
sd, original_fastq, args.output_dir, TEMP_DIR, args.no_output_files)
if __name__ == "__main__":
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
try:
main()
except KeyboardInterrupt:
rm_temp_dir(TEMP_DIR)
except Exception as e:
prog = os.path.basename(sys.argv[0])
rm_temp_dir(TEMP_DIR)
sys.exit("{}: error: {}".format(prog, str(e)))
finally:
rm_temp_dir(TEMP_DIR)
|
|
__gamename__ = "uno"
#Copyright (c) 2010 Thomas Watson
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#'a\x02<--bold \x1f<--underline \x16<--reverse \x0f<-- reset \x0314 <-- color'
#CARD VALUES
#0-9: 0-9
#10: draw 2
#11: reverse
#12: skip
#13: wild
#14: wdf
colors = [4,9,12,8]
import random
class Uno:
def __init__(self, irc, options):
self.irc = irc
self.started = False
self.players = []
self.currplayer = 0
self.direction = 1
self.discard = []
self.topcard = None
self.drew = False
tdeck = []
for x in xrange(1, 13):
tdeck.append(x)
tdeck.append(x)
tdeck.append(0)
self.deck = []
for x in tdeck:
for y in xrange(4):
self.deck.append([x, y])
for x in xrange(4):
self.deck.append([13, -1])
self.deck.append([14, -1])
random.shuffle(self.deck)
def join(self, hostname):
hand = []
for x in xrange(7):
hand.append(self.popcard())
if hostname != "FunBot":
hand.sort(key=lambda card: card[1]*15+card[0])
self.irc.notice(self.irc.getnick(hostname), "Your cards: "+" ".join([self.getcardtext(card) for card in hand]))
if self.irc.getuserdata(hostname) == None:
self.irc.setuserdata(hostname, [0, 0])
self.players.append([hostname, hand])
def getcardtext(self, card_, article=False):
text = ""
card = card_[0]
color = card_[1]
if card < 10:
text = str(card)
elif card == 10:
text = "DT"
elif card == 11:
text = "R"
elif card == 12:
text = "S"
elif card == 13 or card == 14 and color != -1:
if color == 0:
text = "RED"
elif color == 1:
text = "GREEN"
elif color == 2:
text = "BLUE"
elif color == 3:
text = "YELLOW"
text = " "+text+" "
if color != -1:
text = "\x03"+str(colors[color])+",1"+text
else:
if card == 13:
text = "\x038,1W\x0312,1I\x034,1L\x039,1D"
elif card == 14:
text = "\x038,1W\x0312,1D\x034,1F"
text = "\x0300["+text+"\x0F\x0300]\x0F"
if article:
text = ("an " if card == 8 else "a") + text
return text
def popcard(self):
card = self.deck.pop()
if len(self.deck) == 0:
self.deck = self.discard[:]
self.discard = []
random.shuffle(self.deck)
return card
def canstart(self):
if len(self.players) == 1:
return 1
return 0
def start(self):
self.started = True
self.topcard = self.popcard()
self.runturn(True)
def stop(self):
pass
def handleactioncard(self):
card = self.topcard[0]
player = self.irc.getnick(self.players[self.currplayer][0])
if card == 10:
self.irc.send(player + " draws TWO cards and is \x034\x02SKIPPED!")
for x in xrange(2):
self.players[self.currplayer][1].append(self.popcard())
elif card == 11:
self.direction *= -1
if self.direction == 1:
self.irc.send("REVERSE -->")
else:
self.irc.send("<-- REVERSE")
elif card == 12:
self.irc.send(player + " is \x034\x02SKIPPED!")
elif card == 14:
self.irc.send(player + " draws \x02FOUR\x02 cards and is \x034\x02SKIPPED!")
for x in xrange(4):
self.players[self.currplayer][1].append(self.popcard())
else:
return
self.currplayer = (self.currplayer+self.direction)%len(self.players)
def canbeplayed(self, card):
topcard = self.topcard
if topcard[0] == card[0]:
return True
if topcard[1] == card[1]:
return True
if card[0] > 12:
return True
if topcard[1] == -1:
return True
return False
def runturn(self, firstturn=False):
player = self.players[self.currplayer]
nick = self.irc.getnick(player[0])
self.irc.send(nick + " is up, top card: "+self.getcardtext(self.topcard))
if player[0] != "FunBot":
player[1].sort(key=lambda card: card[1]*15+card[0])
self.irc.notice(nick, " ".join([self.getcardtext(card) for card in player[1]]))
if self.topcard[0] > 9 and firstturn == True:
self.handleactioncard()
self.runturn()
if player[0] == "FunBot":
return self.handleai()
self.drew = False
def handleai(self):
me = self.players[self.currplayer]
nick = self.irc.getnick("FunBot")
prefix = self.irc.getprefix()
playablecards = map(self.appendpoints, filter(self.canbeplayed, me[1]))
if len(playablecards) == 0:
if self.drew == True:
return self.handlecmd("s", [], True, "FunBot", nick)
else:
return self.handlecmd("d", [], True, "FunBot", nick)
playablecards.sort()
playablecards.reverse()
preferredcards = filter(lambda card: card[0] != 50, playablecards)
if len(preferredcards) == 0:
pointvals = [[0,"red"],[0,"green"],[0,"blue"],[0,"yellow"]]
for x in me[1]:
if x[1] == -1: continue
pointvals[x[1]][0] += self.appendpoints(x)[0]
pointvals.sort()
pointvals.reverse()
if playablecards[0][1] == 13:
return self.handlecmd("p", ["wild", pointvals[0][1]], True, "FunBot", nick)
else:
return self.handlecmd("p", ["wdf", pointvals[0][1]], True, "FunBot", nick)
preferredcards2 = filter(lambda card: card[2] != self.topcard[1], preferredcards)
if len(preferredcards2) == 0:
preferredcards2 = preferredcards
card_ = preferredcards2[0]
color = ["red", "green", "blue", "yellow"][card_[2]]
if card_[0] < 10:
card = str(card_[0])
else:
card = ["drawtwo", "reverse", "skip"][card_[1]-10]
return self.handlecmd("p", [color, card], True, "FunBot", nick)
def appendpoints(self, card):
if card[0] < 10:
return [card[0], card[0], card[1]]
elif card[0] > 9 and card[0] < 13:
return [20, card[0], card[1]]
else:
return [50, card[0], card[1]]
def getcolor(self, t):
if t == "r" or t == "red":
return 0
elif t == "g" or t == "green":
return 1
elif t == "b" or t == "blue":
return 2
elif t == "y" or t == "yellow":
return 3
return -1
def getcard(self, t):
try:
x = int(t)
if x < 0 or x > 9:
raise Exception
return x
except:
pass
if t == "d2" or t == "dt" or t == "drawtwo":
return 10
elif t == "r" or t == "reverse":
return 11
elif t == "s" or t == "skip":
return 12
return -1
def handlewin(self, hostname):
nick = self.irc.getnick(hostname)
self.irc.send(nick+" wins!!")
points = 0
for player in self.players:
if player[0] == hostname:
continue
player[1].sort(key=lambda card: card[1]*15+card[0])
self.irc.send(self.irc.getnick(player[0])+"'s cards: "+" ".join([self.getcardtext(c) for c in player[1]]))
pointvals = sum([self.appendpoints(c)[0] for c in player[1]])
userdata = self.irc.getuserdata(player[0])
self.irc.setuserdata(player[0], [userdata[0]-pointvals, userdata[1]+1])
points += pointvals
userdata = self.irc.getuserdata(hostname)
self.irc.setuserdata(hostname, [userdata[0]+points, userdata[1]+1])
self.irc.send(nick+" gets "+str(points)+" points!")
def handlecmd(self, cmd, args, playing, hostname, nick):
cmd = cmd.lower()
if not self.started:
return
if cmd == "count":
self.irc.send("Number of cards: "+", ".join([self.irc.getnick(p[0])+" has "+str(len(p[1])) for p in self.players]))
return
if not playing:
return
if self.players[self.currplayer][0] != hostname:
self.irc.notice(nick, "It's not your turn!")
return
if cmd == "p" or cmd == "put":
player = self.players[self.currplayer]
if len(args) < 2:
self.irc.notice(nick, "Invalid play!")
return
t = args[0].lower()
color = self.getcolor(t)
card = -1
wcolor = -1
if color == -1:
if t == "w" or t == "wild":
card = 13
wcolor = self.getcolor(args[1].lower())
elif t == "wdf" or t == "wd4":
card = 14
wcolor = self.getcolor(args[1].lower())
else:
self.irc.notice(nick, "Invalid play!")
return
if card == -1:
card = self.getcard(args[1].lower())
if card == -1:
self.irc.notice(nick, "Invalid play!")
return
if self.canbeplayed([card, color]) == False:
self.irc.notice(nick, "Invalid play!")
return
try:
x = player[1].index([card, color])
except:
self.irc.notice(nick, "Invalid play!")
return
player[1].remove([card, color])
if wcolor != -1:
self.topcard = [card, wcolor]
else:
self.topcard = [card, color]
self.drew = False
self.irc.send(nick+" plays "+self.getcardtext([card, color], True))
if len(player[1]) < 2:
if len(player[1]) == 1:
self.irc.send("\x02"+nick+" has UNO!!")
else:
self.handlewin(hostname)
return True
self.currplayer = (self.currplayer+self.direction)%len(self.players)
self.handleactioncard()
return self.runturn()
elif cmd == "d" or cmd == "draw":
if self.drew == True:
self.irc.notice(nick, "You already drew a card!")
return
self.drew = True
card = self.popcard()
if hostname != "FunBot":
self.irc.notice(nick, "You drew: "+self.getcardtext(card))
self.irc.send(nick+" drew a card")
self.players[self.currplayer][1].append(card)
if hostname == "FunBot":
return self.handleai()
elif cmd == "s" or cmd == "skip":
if self.drew == False:
self.irc.notice(nick, "Draw a card first!")
return
self.drew = False
self.currplayer = (self.currplayer+self.direction)%len(self.players)
return self.runturn()
elif cmd == "c" or cmd == "cards":
self.irc.send("Top card: "+self.getcardtext(self.topcard))
self.irc.notice(nick, " ".join([self.getcardtext(card) for card in self.players[self.currplayer][1]]))
def start(irc, options):
return Uno(irc, options)
def show_stats(userdata):
return "Total points: "+str(userdata[0])+", Number of games played: "+str(userdata[1])
def show_help(cmd):
if cmd == None:
return "This is everybody's favorite card game, Uno!\nCommands: count, put, draw, skip, cards\nOther topics: colornames, cardnames, rules, shortforms"
if cmd == "count":
return "Syntax: count\nShows how many cards each player has."
if cmd == "put":
return "Syntax: <put|p> <color> <card>\nPlays the card that has the color color. If you want to play a wild or wild draw four, the color is wild or wdf and card is the color you want it to change to."
if cmd == "draw":
return "Syntax: <draw|d>\nDraws a card from the deck. Once you draw a card, you must skip or play the card."
if cmd == "skip":
return "Syntax: <skip|s>\nSkips your turn. You must draw a card before you can skip."
if cmd == "cards":
return "Syntax: <cards|c>\nShows the top card and shows you your cards."
if cmd == "colornames":
return "Colors: red or r, green or g, blue or b, yellow or y"
if cmd == "cardnames":
return "Cards: 0 through 9, reverse or r, skip or s, drawtwo or dt or d2, wild or w, wdf or wd4"
if cmd == "shortforms":
return "Short forms: Most commands and cards have a short form that is shorter and easier to type. See the respective help topics for them for the short forms."
if cmd == "rules":
return "Rules of UNO: Uno is a card game where the goal is to get rid of all your cards.\nPlaying cards: You can play a card if it matches the color or number of the top card. Wild and wild draw four cards can be played on anything and change the color of the top card\nAction cards: Action cards are cards that affect something in the game. Reverse: Reverses the direction of play. Skip: Skips the next player's turn. Draw two: Makes the next player draw two cards and skips their turn. Wild: Changes the color of the deck. Wild draw four: Same as wild, but makes the next player draw four cards and skips their turn\nIf you have no cards to play, you can draw a card. You must play the card you drew or skip your turn."
|
|
"""Common functions. """
import pysam
import logging
import subprocess
from contextlib import contextmanager
# create logger for the entire program
log = logging.getLogger('riboplot')
log.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(module)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
ch.setFormatter(formatter)
log.addHandler(ch)
class ArgumentError(Exception):
"""Raised when invalid arguments are sent in the command line."""
pass
class BamFileError(Exception):
"""Errors related to BAM file"""
pass
class RiboPlotError(Exception):
"""General errors relating to riboplot."""
pass
class RiboCountError(Exception):
"""General errors relating to ribocount."""
pass
class RNACountsError(Exception):
"""For errors related to RNA Coverage generation using bedtools. """
pass
def lengths_offsets(value):
"""Split the given comma separated value to multiple integer values. """
values = []
for item in value.split(','):
item = int(item)
values.append(item)
return values
@contextmanager
def open_pysam_file(fname, ftype):
"""Open a BAM or FASTA file with pysam (for use with "with" statement)"""
try:
if ftype == 'bam':
fpysam = pysam.AlignmentFile(fname, 'rb')
elif ftype == 'fasta':
fpysam = pysam.FastaFile(fname)
yield fpysam
except:
raise
else:
fpysam.close()
def is_bam_valid(bam_file):
"""Check if bam file is valid. Raises a ValueError if pysam cannot read the file.
#TODO: pysam does not differentiate between BAM and SAM
"""
try:
f = pysam.AlignmentFile(bam_file)
except ValueError:
raise
except:
raise
else:
f.close()
return True
def bam_has_index(bam_file):
"""Check if bam file has an index. Returns True/False."""
has_index = None
with pysam.AlignmentFile(bam_file, 'rb') as bam_fileobj:
try:
bam_fileobj.fetch(bam_fileobj.references[0])
except ValueError as err:
if err.message == 'fetch called on bamfile without index':
bam_fileobj.close()
has_index = False
else:
has_index = True
return has_index
def create_bam_index(bam_file):
"""Create an index for the given BAM file."""
pysam.index(bam_file)
def is_fasta_valid(fasta_file):
"""Check if fasta file is valid. Raises a ValueError if pysam cannot read the file.
#TODO: pysam does not differentiate between BAM and SAM
"""
try:
f = pysam.FastaFile(fasta_file)
except IOError:
raise
else:
f.close()
return True
def get_first_transcript_name(fasta_file):
"""Return the first FASTA sequence from the given FASTA file.
Keyword arguments:
fasta_file -- FASTA format file of the transcriptome
"""
with open_pysam_file(fname=fasta_file, ftype='fasta') as f:
transcript_name = f.references[0]
return transcript_name
def get_fasta_record(fasta_file, transcript_name):
"""Return a single transcript from a valid fasta file as a record.
record[transcript_name] = sequence
Keyword arguments:
fasta_file -- FASTA format file of the transcriptome
transcript_name -- Name of the transcript as in the FASTA header
"""
with open_pysam_file(fname=fasta_file, ftype='fasta') as f:
sequence = f.fetch(transcript_name)
return {transcript_name: sequence}
def get_fasta_records(fasta, transcripts):
"""Return list of transcript records from the given fasta file.
Each record will be of the form {'sequence_id': {'sequence': 'AAA', 'length': 3}}
trascripts should be provided as a list of sequence id's.
"""
records = {}
f = pysam.FastaFile(fasta)
for transcript in transcripts:
try:
sequence, length = f.fetch(transcript), f.get_reference_length(transcript)
except KeyError:
msg = 'Transcript "{}" does not exist in transcriptome FASTA file'.format(transcript)
log.error(msg)
raise ArgumentError(msg)
records[transcript] = {'sequence': sequence, 'length': length}
f.close()
return records
def get_three_frame_orfs(sequence, starts=None, stops=None):
"""Find ORF's in frames 1, 2 and 3 for the given sequence.
Positions returned are 1-based (not 0)
Return format [{'start': start_position, 'stop': stop_position, 'sequence': sequence}, ]
Keyword arguments:
sequence -- sequence for the transcript
starts -- List of codons to be considered as start (Default: ['ATG'])
stops -- List of codons to be considered as stop (Default: ['TAG', 'TGA', 'TAA'])
"""
if not starts:
starts = ['ATG']
if not stops:
stops = ['TAG', 'TGA', 'TAA']
# Find ORFs in 3 frames
orfs = []
for frame in range(3):
start_codon = None
orf = ''
for position in range(frame, len(sequence), 3):
codon = sequence[position:position + 3]
if codon in starts:
# We have found a start already, so add codon to orf and
# continue. This is an internal MET
if start_codon is not None:
orf += codon
continue
# New orf start
start_codon = position
orf = codon
else:
# if sequence starts with ATG, start_codon will be 0
if start_codon is None:
# We haven't found a start codon yet
continue
orf += codon
if codon in stops:
# orfs[start_codon + 1] = orf
orfs.append({'start': start_codon + 1, 'stop': position + 3, 'sequence': orf})
# Reset
start_codon = None
orf = ''
return orfs
def get_longest_orf(orfs):
"""Find longest ORF from the given list of ORFs."""
sorted_orf = sorted(orfs, key=lambda x: len(x['sequence']), reverse=True)[0]
return sorted_orf
def filter_ribo_counts(counts, orf_start=None, orf_stop=None):
"""Filter read counts and return only upstream of orf_start or downstream
of orf_stop.
Keyword arguments:
counts -- Ribo-Seq read counts obtained from get_ribo_counts.
orf_start -- Start position of the longest ORF.
orf_stop -- Stop position of the longest ORF.
"""
filtered_counts = dict.copy(counts)
for position in counts:
if orf_start and orf_stop:
# if only upstream and downstream reads are required, check if
# current position is upstream or downstream of the ORF start/stop
# if not, remove from counts
if (position > orf_start and position < orf_stop):
filtered_counts.pop(position)
elif orf_start:
# check if current position is upstream of ORF start. if not, remove
if position >= orf_start:
filtered_counts.pop(position)
elif orf_stop:
# check if current position is downstream of ORF stop. If not,
# remove
if position <= orf_stop:
filtered_counts.pop(position)
# calculate total reads for this transcript
total_reads = sum(sum(item.values()) for item in filtered_counts.values())
return filtered_counts, total_reads
def get_ribo_counts(ribo_fileobj, transcript_name, read_lengths, read_offsets):
"""For each mapped read of the given transcript in the BAM file
(pysam AlignmentFile object), return the position (+1) and the
corresponding frame (1, 2 or 3) to which it aligns.
Keyword arguments:
ribo_fileobj -- file object - BAM file opened using pysam AlignmentFile
transcript_name -- Name of transcript to get counts for
read_length (optional) -- If provided, get counts only for reads of this length.
"""
read_counts = {}
total_reads = 0
for record in ribo_fileobj.fetch(transcript_name):
query_length = record.query_length
position_ref = record.pos + 1
for index, read_length in enumerate(read_lengths):
position = position_ref # reset position
if read_length == 0 or read_length == query_length:
# if an offset is specified, increment position by that offset.
position += read_offsets[index]
else:
# ignore other reads/lengths
continue
total_reads += 1
try:
read_counts[position]
except KeyError:
read_counts[position] = {1: 0, 2: 0, 3: 0}
# calculate the frame of the read from position
rem = position % 3
if rem == 0:
read_counts[position][3] += 1
else:
read_counts[position][rem] += 1
log.debug('Total read counts: {}'.format(total_reads))
log.debug('RiboSeq read counts for transcript: {0}\n{1}'.format(transcript_name, read_counts))
return read_counts, total_reads
def check_required_arguments(ribo_file, transcriptome_fasta, transcript_name=None):
"""Check required arguments of both riboplot and ribocount."""
# Is this a valid BAM file? i.e., can pysam read it?
try:
is_bam_valid(ribo_file)
except ValueError:
log.error('The given RiboSeq BAM file is not valid')
raise
# Does the BAM file have an index? If not, create it.
if not bam_has_index(ribo_file):
log.info('Creating an index for the BAM file...')
create_bam_index(ribo_file)
if not bam_has_index(ribo_file):
msg = ('Could not create an index for this BAM file. Is this a valid BAM file '
'and/or is the BAM file sorted by chromosomal coordinates?')
log.error(msg)
raise BamFileError(msg)
# Is FASTA file valid?
fasta_valid = False
try:
fasta_valid = is_fasta_valid(transcriptome_fasta)
except IOError:
log.error('Transcriptome FASTA file is not valid')
raise
if fasta_valid:
if transcript_name:
try:
get_fasta_records(transcriptome_fasta, [transcript_name])
except IOError:
log.error('Could not get FASTA sequence of "{}" from transcriptome FASTA file'.format(transcript_name))
raise
else:
# ribocount doesn't have a transcript option so we get the first
# sequence name from the fasta file
transcript_name = get_first_transcript_name(transcriptome_fasta)
# check if transcript also exists in BAM
with pysam.AlignmentFile(ribo_file, 'rb') as bam_file:
if transcript_name not in bam_file.references:
msg = 'Transcript "{}" does not exist in BAM file'.format(transcript_name)
log.error(msg)
raise ArgumentError(msg)
def check_rna_file(rna_file):
"""Check if bedtools is available and if the given RNA-Seq bam file is valid. """
try:
subprocess.check_output(['bedtools', '--version'])
except OSError:
log.error('Could not find bedtools in PATH. bedtools is required '
'for generating RNA coverage plot.')
raise
# Is this a valid BAM file? i.e., can pysam read it?
try:
is_bam_valid(rna_file)
except ValueError:
log.error('The given RNASeq BAM file is not valid')
raise
def check_read_lengths(ribo_file, read_lengths):
"""Check if read lengths are valid (positive). """
# check if there are any valid read lengths to check i.e., not equal to 0
valid_lengths = list(set(read_lengths))
# if read length is 0, all read lengths are requested so we skip further
# checks.
if len(valid_lengths) == 1 and valid_lengths[0] == 0:
return
for read_length in valid_lengths:
if read_length < 0:
msg = 'Read length must be a positive value'
log.error(msg)
raise ArgumentError(msg)
def check_read_offsets(read_offsets):
"""Check if read offsets are valid (positive)."""
for read_offset in read_offsets:
if read_offset < 0:
msg = 'Read offset must be 0 or greater'
log.error(msg)
raise ArgumentError(msg)
def check_read_lengths_offsets(read_lengths, read_offsets):
"""Check if read length has corresponding read offset for all read lengths. """
if not len(read_lengths) == len(read_offsets):
raise ArgumentError('Each read length should have a corresponding offset value')
else:
return True
|
|
# Copyright (c) 2007, Linden Research, Inc.
# Copyright (c) 2007, IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import gc
import random
import re
import time
import eventlet
from eventlet import tpool, debug, event
from eventlet.support import six
from tests import LimitedTestCase, skip_with_pyevent, main
one = 1
two = 2
three = 3
none = None
def noop():
pass
def raise_exception():
raise RuntimeError("hi")
class TestTpool(LimitedTestCase):
def setUp(self):
super(TestTpool, self).setUp()
def tearDown(self):
tpool.killall()
super(TestTpool, self).tearDown()
@skip_with_pyevent
def test_wrap_tuple(self):
my_tuple = (1, 2)
prox = tpool.Proxy(my_tuple)
self.assertEqual(prox[0], 1)
self.assertEqual(prox[1], 2)
self.assertEqual(len(my_tuple), 2)
@skip_with_pyevent
def test_wrap_string(self):
my_object = "whatever"
prox = tpool.Proxy(my_object)
self.assertEqual(str(my_object), str(prox))
self.assertEqual(len(my_object), len(prox))
self.assertEqual(my_object.join(['a', 'b']), prox.join(['a', 'b']))
@skip_with_pyevent
def test_wrap_uniterable(self):
prox = tpool.Proxy([])
def index():
prox[0]
def key():
prox['a']
self.assertRaises(IndexError, index)
self.assertRaises(TypeError, key)
@skip_with_pyevent
def test_wrap_dict(self):
my_object = {'a': 1}
prox = tpool.Proxy(my_object)
self.assertEqual('a', list(prox.keys())[0])
self.assertEqual(1, prox['a'])
self.assertEqual(str(my_object), str(prox))
self.assertEqual(repr(my_object), repr(prox))
@skip_with_pyevent
def test_wrap_module_class(self):
prox = tpool.Proxy(re)
self.assertEqual(tpool.Proxy, type(prox))
exp = prox.compile('(.)(.)(.)')
self.assertEqual(exp.groups, 3)
assert repr(prox.compile)
@skip_with_pyevent
def test_wrap_eq(self):
prox = tpool.Proxy(re)
exp1 = prox.compile('.')
exp2 = prox.compile(exp1.pattern)
self.assertEqual(exp1, exp2)
exp3 = prox.compile('/')
assert exp1 != exp3
@skip_with_pyevent
def test_wrap_ints(self):
p = tpool.Proxy(4)
assert p == 4
@skip_with_pyevent
def test_wrap_hash(self):
prox1 = tpool.Proxy('' + 'A')
prox2 = tpool.Proxy('A' + '')
assert prox1 == 'A'
assert 'A' == prox2
# assert prox1 == prox2 FIXME - could __eq__ unwrap rhs if it is other proxy?
self.assertEqual(hash(prox1), hash(prox2))
proxList = tpool.Proxy([])
self.assertRaises(TypeError, hash, proxList)
@skip_with_pyevent
def test_wrap_nonzero(self):
prox = tpool.Proxy(re)
exp1 = prox.compile('.')
assert bool(exp1)
prox2 = tpool.Proxy([1, 2, 3])
assert bool(prox2)
@skip_with_pyevent
def test_multiple_wraps(self):
prox1 = tpool.Proxy(re)
prox2 = tpool.Proxy(re)
prox1.compile('.')
x2 = prox1.compile('.')
del x2
prox2.compile('.')
@skip_with_pyevent
def test_wrap_getitem(self):
prox = tpool.Proxy([0, 1, 2])
self.assertEqual(prox[0], 0)
@skip_with_pyevent
def test_wrap_setitem(self):
prox = tpool.Proxy([0, 1, 2])
prox[1] = 2
self.assertEqual(prox[1], 2)
@skip_with_pyevent
def test_wrap_iterator(self):
self.reset_timeout(2)
prox = tpool.Proxy(range(10))
result = []
for i in prox:
result.append(i)
self.assertEqual(list(range(10)), result)
@skip_with_pyevent
def test_wrap_iterator2(self):
self.reset_timeout(5) # might take a while due to imprecise sleeping
def foo():
import time
for x in range(2):
yield x
time.sleep(0.001)
counter = [0]
def tick():
for i in six.moves.range(20000):
counter[0] += 1
if counter[0] % 20 == 0:
eventlet.sleep(0.0001)
else:
eventlet.sleep()
gt = eventlet.spawn(tick)
previtem = 0
for item in tpool.Proxy(foo()):
assert item >= previtem
# make sure the tick happened at least a few times so that we know
# that our iterations in foo() were actually tpooled
assert counter[0] > 10, counter[0]
gt.kill()
@skip_with_pyevent
def test_raising_exceptions(self):
prox = tpool.Proxy(re)
def nofunc():
prox.never_name_a_function_like_this()
self.assertRaises(AttributeError, nofunc)
from tests import tpool_test
prox = tpool.Proxy(tpool_test)
self.assertRaises(RuntimeError, prox.raise_exception)
@skip_with_pyevent
def test_variable_and_keyword_arguments_with_function_calls(self):
import optparse
parser = tpool.Proxy(optparse.OptionParser())
parser.add_option('-n', action='store', type='string', dest='n')
opts, args = parser.parse_args(["-nfoo"])
self.assertEqual(opts.n, 'foo')
@skip_with_pyevent
def test_contention(self):
from tests import tpool_test
prox = tpool.Proxy(tpool_test)
pile = eventlet.GreenPile(4)
pile.spawn(lambda: self.assertEqual(prox.one, 1))
pile.spawn(lambda: self.assertEqual(prox.two, 2))
pile.spawn(lambda: self.assertEqual(prox.three, 3))
results = list(pile)
self.assertEqual(len(results), 3)
@skip_with_pyevent
def test_timeout(self):
import time
eventlet.Timeout(0.1, eventlet.TimeoutError())
self.assertRaises(eventlet.TimeoutError,
tpool.execute, time.sleep, 0.3)
@skip_with_pyevent
def test_killall(self):
tpool.killall()
tpool.setup()
@skip_with_pyevent
def test_killall_remaining_results(self):
semaphore = event.Event()
def native_fun():
time.sleep(.5)
def gt_fun():
semaphore.send(None)
tpool.execute(native_fun)
gt = eventlet.spawn(gt_fun)
semaphore.wait()
tpool.killall()
gt.wait()
@skip_with_pyevent
def test_autowrap(self):
x = tpool.Proxy({'a': 1, 'b': 2}, autowrap=(int,))
assert isinstance(x.get('a'), tpool.Proxy)
assert not isinstance(x.items(), tpool.Proxy)
# attributes as well as callables
from tests import tpool_test
x = tpool.Proxy(tpool_test, autowrap=(int,))
assert isinstance(x.one, tpool.Proxy)
assert not isinstance(x.none, tpool.Proxy)
@skip_with_pyevent
def test_autowrap_names(self):
x = tpool.Proxy({'a': 1, 'b': 2}, autowrap_names=('get',))
assert isinstance(x.get('a'), tpool.Proxy)
assert not isinstance(x.items(), tpool.Proxy)
from tests import tpool_test
x = tpool.Proxy(tpool_test, autowrap_names=('one',))
assert isinstance(x.one, tpool.Proxy)
assert not isinstance(x.two, tpool.Proxy)
@skip_with_pyevent
def test_autowrap_both(self):
from tests import tpool_test
x = tpool.Proxy(tpool_test, autowrap=(int,), autowrap_names=('one',))
assert isinstance(x.one, tpool.Proxy)
# violating the abstraction to check that we didn't double-wrap
assert not isinstance(x._obj, tpool.Proxy)
@skip_with_pyevent
def test_callable(self):
def wrapped(arg):
return arg
x = tpool.Proxy(wrapped)
self.assertEqual(4, x(4))
# verify that it wraps return values if specified
x = tpool.Proxy(wrapped, autowrap_names=('__call__',))
assert isinstance(x(4), tpool.Proxy)
self.assertEqual("4", str(x(4)))
@skip_with_pyevent
def test_callable_iterator(self):
def wrapped(arg):
yield arg
yield arg
yield arg
x = tpool.Proxy(wrapped, autowrap_names=('__call__',))
for r in x(3):
self.assertEqual(3, r)
@skip_with_pyevent
def test_eventlet_timeout(self):
def raise_timeout():
raise eventlet.Timeout()
self.assertRaises(eventlet.Timeout, tpool.execute, raise_timeout)
@skip_with_pyevent
def test_tpool_set_num_threads(self):
tpool.set_num_threads(5)
self.assertEqual(5, tpool._nthreads)
class TpoolLongTests(LimitedTestCase):
TEST_TIMEOUT = 60
@skip_with_pyevent
def test_a_buncha_stuff(self):
assert_ = self.assert_
class Dummy(object):
def foo(self, when, token=None):
assert_(token is not None)
time.sleep(random.random() / 200.0)
return token
def sender_loop(loopnum):
obj = tpool.Proxy(Dummy())
count = 100
for n in six.moves.range(count):
eventlet.sleep(random.random() / 200.0)
now = time.time()
token = loopnum * count + n
rv = obj.foo(now, token=token)
self.assertEqual(token, rv)
eventlet.sleep(random.random() / 200.0)
cnt = 10
pile = eventlet.GreenPile(cnt)
for i in six.moves.range(cnt):
pile.spawn(sender_loop, i)
results = list(pile)
self.assertEqual(len(results), cnt)
tpool.killall()
@skip_with_pyevent
def test_leakage_from_tracebacks(self):
tpool.execute(noop) # get it started
gc.collect()
initial_objs = len(gc.get_objects())
for i in range(10):
self.assertRaises(RuntimeError, tpool.execute, raise_exception)
gc.collect()
middle_objs = len(gc.get_objects())
# some objects will inevitably be created by the previous loop
# now we test to ensure that running the loop an order of
# magnitude more doesn't generate additional objects
for i in six.moves.range(100):
self.assertRaises(RuntimeError, tpool.execute, raise_exception)
first_created = middle_objs - initial_objs
gc.collect()
second_created = len(gc.get_objects()) - middle_objs
self.assert_(second_created - first_created < 10,
"first loop: %s, second loop: %s" % (first_created,
second_created))
tpool.killall()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
import json
import os.path
from time import time
from pysnap.utils import (encrypt, decrypt, decrypt_story,
make_media_id, request)
MEDIA_IMAGE = 0
MEDIA_VIDEO = 1
MEDIA_VIDEO_NOAUDIO = 2
FRIEND_CONFIRMED = 0
FRIEND_UNCONFIRMED = 1
FRIEND_BLOCKED = 2
PRIVACY_EVERYONE = 0
PRIVACY_FRIENDS = 1
def is_video(data):
return len(data) > 1 and data[0:2] == b'\x00\x00'
def is_image(data):
return len(data) > 1 and data[0:2] == b'\xFF\xD8'
def is_zip(data):
return len(data) > 1 and data[0:2] == b'PK'
def get_file_extension(media_type):
if media_type in (MEDIA_VIDEO, MEDIA_VIDEO_NOAUDIO):
return 'mp4'
if media_type == MEDIA_IMAGE:
return 'jpg'
return ''
def get_media_type(data):
if is_video(data):
return MEDIA_VIDEO
if is_image(data):
return MEDIA_IMAGE
return None
def _map_keys(snap):
return {
u'id': snap.get('id', None),
u'media_id': snap.get('c_id', None),
u'media_type': snap.get('m', None),
u'time': snap.get('t', None),
u'sender': snap.get('sn', None),
u'recipient': snap.get('rp', None),
u'status': snap.get('st', None),
u'screenshot_count': snap.get('c', None),
u'sent': snap.get('sts', None),
u'opened': snap.get('ts', None)
}
class Snapchat(object):
"""Construct a :class:`Snapchat` object used for communicating
with the Snapchat API.
Usage:
from pysnap import Snapchat
snapchat = Snapchat()
snapchat.login('username', 'password')
...
"""
def __init__(self):
self.username = None
self.auth_token = None
def _request(self, endpoint, data=None, files=None,
raise_for_status=True, req_type='post'):
return request(endpoint, self.auth_token, data, files,
raise_for_status, req_type)
def _unset_auth(self):
self.username = None
self.auth_token = None
def login(self, username, password):
"""Login to Snapchat account
Returns a dict containing user information on successful login, the
data returned is similar to get_updates.
:param username Snapchat username
:param password Snapchat password
"""
self._unset_auth()
r = self._request('login', {
'username': username,
'password': password
})
result = r.json()
if 'auth_token' in result:
self.auth_token = result['auth_token']
if 'username' in result:
self.username = username
return result
def logout(self):
"""Logout of Snapchat account
Returns true if logout was successful.
"""
r = self._request('logout', {'username': self.username})
return len(r.content) == 0
def get_updates(self, update_timestamp=0):
"""Get user, friend and snap updates
Returns a dict containing user, friends and snap information.
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
r = self._request('updates', {
'username': self.username,
'update_timestamp': update_timestamp
})
result = r.json()
if 'auth_token' in result:
self.auth_token = result['auth_token']
return result
def get_snaps(self, update_timestamp=0):
"""Get snaps
Returns a dict containing metadata for snaps
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
updates = self.get_updates(update_timestamp)
# Filter out snaps containing c_id as these are sent snaps
return [_map_keys(snap) for snap in updates['snaps']
if 'c_id' not in snap]
def get_friend_stories(self, update_timestamp=0):
"""Get stories
Returns a dict containing metadata for stories
:param update_timestamp: Optional timestamp (epoch in seconds) to limit
updates
"""
r = self._request("all_updates", {
'username': self.username,
'update_timestamp': update_timestamp
})
result = r.json()
if 'auth_token' in result:
self.auth_token = result['auth_token']
stories = []
story_groups = result['stories_response']['friend_stories']
for group in story_groups:
sender = group['username']
for story in group['stories']:
obj = story['story']
obj['sender'] = sender
stories.append(obj)
return stories
def get_story_blob(self, story_id, story_key, story_iv):
"""Get the image or video of a given snap
Returns the decrypted image or a video of the given snap or None if
data is invalid.
:param story_id: Media id to fetch
:param story_key: Encryption key of the story
:param story_iv: Encryption IV of the story
"""
r = self._request('story_blob', {'story_id': story_id},
raise_for_status=False, req_type='get')
data = decrypt_story(r.content, story_key, story_iv)
if any((is_image(data), is_video(data), is_zip(data))):
return data
return None
def get_blob(self, snap_id):
"""Get the image or video of a given snap
Returns the decrypted image or a video of the given snap or None if
data is invalid.
:param snap_id: Snap id to fetch
"""
r = self._request('blob', {'username': self.username, 'id': snap_id},
raise_for_status=False)
data = decrypt(r.content)
if any((is_image(data), is_video(data), is_zip(data))):
return data
return None
def send_events(self, events, data=None):
"""Send event data
Returns true on success.
:param events: List of events to send
:param data: Additional data to send
"""
if data is None:
data = {}
r = self._request('update_snaps', {
'username': self.username,
'events': json.dumps(events),
'json': json.dumps(data)
})
return len(r.content) == 0
def mark_viewed(self, snap_id, view_duration=1):
"""Mark a snap as viewed
Returns true on success.
:param snap_id: Snap id to mark as viewed
:param view_duration: Number of seconds snap was viewed
"""
now = time()
data = {snap_id: {u't': now, u'sv': view_duration}}
events = [
{
u'eventName': u'SNAP_VIEW', u'params': {u'id': snap_id},
u'ts': int(round(now)) - view_duration
},
{
u'eventName': u'SNAP_EXPIRED', u'params': {u'id': snap_id},
u'ts': int(round(now))
}
]
return self.send_events(events, data)
def mark_screenshot(self, snap_id, view_duration=1):
"""Mark a snap as screenshotted
Returns true on success.
:param snap_id: Snap id to mark as viewed
:param view_duration: Number of seconds snap was viewed
"""
now = time()
data = {snap_id: {u't': now, u'sv': view_duration, u'c': 3}}
events = [
{
u'eventName': u'SNAP_SCREENSHOT', u'params': {u'id': snap_id},
u'ts': int(round(now)) - view_duration
}
]
return self.send_events(events, data)
def update_privacy(self, friends_only):
"""Set privacy settings
Returns true on success.
:param friends_only: True to allow snaps from friends only
"""
setting = lambda f: PRIVACY_FRIENDS if f else PRIVACY_EVERYONE
r = self._request('settings', {
'username': self.username,
'action': 'updatePrivacy',
'privacySetting': setting(friends_only)
})
return r.json().get('param') == str(setting(friends_only))
def get_friends(self):
"""Get friends
Returns a list of friends.
"""
return self.get_updates().get('friends', [])
def get_best_friends(self):
"""Get best friends
Returns a list of best friends.
"""
return self.get_updates().get('bests', [])
def add_friend(self, username):
"""Add user as friend
Returns JSON response.
Expected messages:
Success: '{username} is now your friend!'
Pending: '{username} is private. Friend request sent.'
Failure: 'Sorry! Couldn't find {username}'
:param username: Username to add as a friend
"""
r = self._request('friend', {
'action': 'add',
'friend': username,
'username': self.username
})
return r.json()
def delete_friend(self, username):
"""Remove user from friends
Returns true on success.
:param username: Username to remove from friends
"""
r = self._request('friend', {
'action': 'delete',
'friend': username,
'username': self.username
})
return r.json().get('logged')
def block(self, username):
"""Block a user
Returns true on success.
:param username: Username to block
"""
r = self._request('friend', {
'action': 'block',
'friend': username,
'username': self.username
})
return r.json().get('message') == '{0} was blocked'.format(username)
def unblock(self, username):
"""Unblock a user
Returns true on success.
:param username: Username to unblock
"""
r = self._request('friend', {
'action': 'unblock',
'friend': username,
'username': self.username
})
return r.json().get('message') == '{0} was unblocked'.format(username)
def get_blocked(self):
"""Find blocked users
Returns a list of currently blocked users.
"""
return [f for f in self.get_friends() if f['type'] == FRIEND_BLOCKED]
def upload(self, path):
"""Upload media
Returns the media ID on success. The media ID is used when sending
the snap.
"""
if not os.path.exists(path):
raise ValueError('No such file: {0}'.format(path))
with open(path, 'rb') as f:
data = f.read()
media_type = get_media_type(data)
if media_type is None:
raise ValueError('Could not determine media type for given data')
media_id = make_media_id(self.username)
r = self._request('upload', {
'username': self.username,
'media_id': media_id,
'type': media_type
}, files={'data': encrypt(data)})
return media_id if len(r.content) == 0 else None
def send(self, media_id, recipients, time=5):
"""Send a snap. Requires a media_id returned by the upload method
Returns true if the snap was sent successfully
"""
r = self._request('send', {
'username': self.username,
'media_id': media_id,
'recipient': recipients,
'time': time,
'zipped': '0'
})
return len(r.content) == 0
def send_to_story(self, media_id, time=5, media_type=0):
"""Send a snap to your story. Requires a media_id returned by the upload method
Returns true if the snap was sent successfully.
"""
r = self._request('post_story', {
'username': self.username,
'media_id': media_id,
'client_id': media_id,
'time': time,
'type': media_type,
'zipped': '0'
})
return r.json()
def clear_feed(self):
"""Clear the user's feed
Returns true if feed was successfully cleared.
"""
r = self._request('clear', {
'username': self.username
})
return len(r.content) == 0
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2StatefulSet(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta2StatefulSetSpec',
'status': 'V1beta2StatefulSetStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1beta2StatefulSet - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this V1beta2StatefulSet.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta2StatefulSet.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta2StatefulSet.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta2StatefulSet.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta2StatefulSet.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta2StatefulSet.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta2StatefulSet.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta2StatefulSet.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta2StatefulSet.
:return: The metadata of this V1beta2StatefulSet.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta2StatefulSet.
:param metadata: The metadata of this V1beta2StatefulSet.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta2StatefulSet.
Spec defines the desired identities of pods in this set.
:return: The spec of this V1beta2StatefulSet.
:rtype: V1beta2StatefulSetSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta2StatefulSet.
Spec defines the desired identities of pods in this set.
:param spec: The spec of this V1beta2StatefulSet.
:type: V1beta2StatefulSetSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1beta2StatefulSet.
Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.
:return: The status of this V1beta2StatefulSet.
:rtype: V1beta2StatefulSetStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta2StatefulSet.
Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.
:param status: The status of this V1beta2StatefulSet.
:type: V1beta2StatefulSetStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2StatefulSet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Contains code for non-linear model multi-fidelity model.
It is based on this paper:
Nonlinear information fusion algorithms for data-efficient multi-fidelity modelling.
P. Perdikaris, M. Raissi, A. Damianou, N. D. Lawrence and G. E. Karniadakis (2017)
http://web.mit.edu/parisp/www/assets/20160751.full.pdf
"""
from typing import List, Tuple, Type
import GPy
import numpy as np
from ...core.interfaces import IDifferentiable, IModel
from ..convert_lists_to_array import convert_x_list_to_array, convert_y_list_to_array
def make_non_linear_kernels(
base_kernel_class: Type[GPy.kern.Kern], n_fidelities: int, n_input_dims: int, ARD: bool = False
) -> List:
"""
This function takes a base kernel class and constructs the structured multi-fidelity kernels
At the first level the kernel is simply:
.. math
k_{base}(x, x')
At subsequent levels the kernels are of the form
.. math
k_{base}(x, x')k_{base}(y_{i-1}, y{i-1}') + k_{base}(x, x')
:param base_kernel_class: GPy class definition of the kernel type to construct the kernels at
:param n_fidelities: Number of fidelities in the model. A kernel will be returned for each fidelity
:param n_input_dims: The dimensionality of the input.
:param ARD: If True, uses different lengthscales for different dimensions. Otherwise the same lengthscale is used
for all dimensions. Default False.
:return: A list of kernels with one entry for each fidelity starting from lowest to highest fidelity.
"""
base_dims_list = list(range(n_input_dims))
kernels = [base_kernel_class(n_input_dims, active_dims=base_dims_list, ARD=ARD, name="kern_fidelity_1")]
for i in range(1, n_fidelities):
fidelity_name = "fidelity" + str(i + 1)
interaction_kernel = base_kernel_class(
n_input_dims, active_dims=base_dims_list, ARD=ARD, name="scale_kernel_" + fidelity_name
)
scale_kernel = base_kernel_class(1, active_dims=[n_input_dims], name="previous_fidelity_" + fidelity_name)
bias_kernel = base_kernel_class(
n_input_dims, active_dims=base_dims_list, ARD=ARD, name="bias_kernel_" + fidelity_name
)
kernels.append(interaction_kernel * scale_kernel + bias_kernel)
return kernels
class NonLinearMultiFidelityModel(IModel, IDifferentiable):
"""
Non-linear Model for multiple fidelities. This implementation of the model only handles 1-dimensional outputs.
The theory implies the training points should be nested such that any point in a higher fidelity exists in all lower
fidelities, in practice the model will work if this constraint is ignored.
"""
def __init__(
self,
X_init: np.ndarray,
Y_init: np.ndarray,
n_fidelities,
kernels: List[GPy.kern.Kern],
n_samples=100,
verbose=False,
optimization_restarts=5,
) -> None:
"""
By default the noise at intermediate levels will be fixed to 1e-4.
:param X_init: Initial X values.
:param Y_init: Initial Y values.
:param n_fidelities: Number of fidelities in problem.
:param kernels: List of kernels for each GP model at each fidelity. The first kernel should take input of
dimension d_in and each subsequent kernel should take input of dimension (d_in+1) where d_in is
the dimensionality of the features.
:param n_samples: Number of samples to use to do quasi-Monte-Carlo integration at each fidelity. Default 100
:param verbose: Whether to output messages during optimization. Defaults to False.
:param optimization_restarts: Number of random restarts
when optimizing the Gaussian processes' hyper-parameters.
"""
if not isinstance(X_init, np.ndarray):
raise TypeError("X_init expected to be a numpy array")
if not isinstance(Y_init, np.ndarray):
raise TypeError("Y_init expected to be a numpy array")
self.verbose = verbose
self.optimization_restarts = optimization_restarts
self.n_fidelities = n_fidelities
# Generate random numbers from standardized gaussian for monte-carlo integration
self.monte_carlo_rand_numbers = np.random.randn(n_samples)[:, np.newaxis]
# Make lowest fidelity model
self.models = []
self._fidelity_idx = -1
is_lowest_fidelity = X_init[:, self._fidelity_idx] == 0
self.models.append(
GPy.models.GPRegression(X_init[is_lowest_fidelity, :-1], Y_init[is_lowest_fidelity, :], kernels[0])
)
# Make models for fidelities but lowest fidelity
for i in range(1, self.n_fidelities):
is_ith_fidelity = X_init[:, self._fidelity_idx] == i
# Append previous fidelity mean to X
previous_mean, _ = self._predict_deterministic(X_init[is_ith_fidelity, :-1], i)
augmented_input = np.concatenate([X_init[is_ith_fidelity, :-1], previous_mean], axis=1)
self.models.append(GPy.models.GPRegression(augmented_input, Y_init[is_ith_fidelity, :], kernels[i]))
# Fix noise parameters for all models except top fidelity
for model in self.models[:-1]:
model.Gaussian_noise.fix(1e-4)
def set_data(self, X: np.ndarray, Y: np.ndarray) -> None:
"""
Updates training data in the model.
:param X: New training features.
:param Y: New training targets.
"""
is_lowest_fidelity = 0 == X[:, -1]
X_low_fidelity = X[is_lowest_fidelity, :-1]
Y_low_fidelity = Y[is_lowest_fidelity, :]
self.models[0].set_XY(X_low_fidelity, Y_low_fidelity)
for i in range(1, self.n_fidelities):
is_this_fidelity = i == X[:, -1]
X_this_fidelity = X[is_this_fidelity, :-1]
Y_this_fidelity = Y[is_this_fidelity, :]
previous_mean, _ = self._predict_deterministic(X_this_fidelity, i)
augmented_input = np.concatenate([X_this_fidelity, previous_mean], axis=1)
self.models[i].set_XY(augmented_input, Y_this_fidelity)
@property
def X(self):
"""
:return: input array of size (n_points x n_inputs_dims) across every fidelity in original input domain meaning
it excludes inputs to models that come from the output of the previous level
"""
x_list = [self.models[0].X]
for model in self.models[1:]:
x_list.append(model.X[:, :-1])
return convert_x_list_to_array(x_list)
@property
def Y(self):
"""
:return: output array of size (n_points x n_outputs) across every fidelity level
"""
return convert_y_list_to_array([model.Y for model in self.models])
@property
def n_samples(self):
return self.monte_carlo_rand_numbers.shape[0]
def predict(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Predicts mean and variance at fidelity given by the last column of X
Note that the posterior isn't Gaussian and so this function doesn't tell us everything about our posterior
distribution.
:param X: Input locations with fidelity index appended.
:returns: mean and variance of posterior distribution at X.
"""
fidelity = X[:, self._fidelity_idx]
# Do prediction 1 test point at a time
variance = np.zeros((X.shape[0], 1))
mean = np.zeros((X.shape[0], 1))
for i in range(X.shape[0]):
sample_mean, sample_var = self._predict_samples(X[[i], :-1], fidelity[i])
# Calculate total variance and mean from samples
variance[i, :] = np.mean(sample_var) + np.var(sample_mean)
mean[i, :] = np.mean(sample_mean)
return mean, variance
def get_prediction_gradients(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Predicts mean and variance and the gradients of the mean and variance with respect to X.
:param X: input location.
:returns: (mean, mean gradient, variance, variance gradient) Gradients will be shape (n_points x (d-1)) because
we don't return the gradient with respect to the fidelity index.
"""
fidelity = X[:, self._fidelity_idx]
# Initialise vectors
sample_mean = np.zeros((self.n_samples ** (self.n_fidelities - 1), X.shape[0]))
d_sample_mean_dx = np.zeros((self.n_samples ** (self.n_fidelities - 1), X.shape[0], X.shape[1] - 1))
d_sample_var_dx = np.zeros((self.n_samples ** (self.n_fidelities - 1), X.shape[0], X.shape[1] - 1))
# Iteratively obtain predictions and associated gradients for each input point
for i in range(X.shape[0]):
mean, dmean_dx, var, dvar_dx = self._predict_samples_with_gradients(X[[i], :-1], fidelity[i])
# Assign to outputs
sample_mean[:, [i]] = mean
d_sample_mean_dx[:, i, :] = dmean_dx
d_sample_var_dx[:, i, :] = dvar_dx
# Calculate means + total variance
total_mean = np.mean(sample_mean, axis=0, keepdims=True).T
total_mean_grad = np.mean(d_sample_mean_dx, axis=0)
# Calculate total variance derivative
tmp = 2 * np.mean(d_sample_mean_dx * sample_mean[:, :, None], axis=0)
total_variance_grad = np.mean(d_sample_var_dx, axis=0) + tmp - 2 * total_mean * total_mean_grad
return total_mean_grad, total_variance_grad
def _predict_samples(self, X: np.ndarray, fidelity: float):
"""
Draw samples from model at given fidelity. Returns samples of mean and variance at specified fidelity.
:param X: Input array without output of previous layer appended.
:param fidelity: zero based fidelity index.
:returns sample_mean, sample_variance: mean and variance predictions at input points.
"""
fidelity = int(fidelity)
# Predict at first fidelity
sample_mean, sample_variance = self.models[0].predict(X)
# Predict at all fidelities up until the one we are interested in
for i in range(1, fidelity + 1):
# Draw samples from posterior of previous fidelity
sample_mean, sample_variance, _ = self._propagate_samples_through_level(X, i, sample_mean, sample_variance)
return sample_mean, sample_variance
def _predict_samples_with_gradients(self, X: np.ndarray, fidelity: float):
"""
Draw samples of mean and variance from model at given fidelity and the gradients of these samples wrt X.
We calculate the gradients by applying the chain rule as the gradients of each Gaussian process is known wrt
its inputs.
:param X: Input array without output of previous layer appended.
:param fidelity: zero based fidelity index.
:returns mean, mean gradient, variance, variance gradient: mean and variance predictions at input points.
"""
fidelity = int(fidelity)
# Predict at first fidelity
dsample_mean_dx, dsample_var_dx = self.models[0].predictive_gradients(X)
dsample_mean_dx = dsample_mean_dx[:, :, 0]
sample_mean, sample_variance = self.models[0].predict(X)
for i in range(1, fidelity + 1):
previous_sample_variance = sample_variance.copy()
# Predict at all fidelities up until the one we are interested in
sample_mean, sample_variance, x_augmented = self._propagate_samples_through_level(
X, i, sample_mean, sample_variance
)
dsample_mean_dx, dsample_var_dx = self._propagate_samples_through_level_gradient(
dsample_mean_dx, dsample_var_dx, i, previous_sample_variance, x_augmented
)
return sample_mean, dsample_mean_dx, sample_variance, dsample_var_dx
def _propagate_samples_through_level(self, X, i_level, sample_mean, sample_variance):
"""
Sample from the posterior of level i - 1 and propagates these samples through level i.
:param X: Input array without output of previous layer appended.
:param i_level: level to push through
:param sample_mean: mean from previous level
:param sample_variance: variance from previous level
"""
# Draw samples from posterior of previous fidelity
samples = self.monte_carlo_rand_numbers * np.sqrt(sample_variance) + sample_mean.T
samples = samples.flatten()[:, None]
# Create inputs for each sample
x_repeat = np.repeat(X, self.n_samples ** i_level, axis=0)
# Augment input with mean of previous fidelity
x_augmented = np.concatenate([x_repeat, samples], axis=1)
# Predict mean and variance and fidelity i
sample_mean, sample_variance = self.models[i_level].predict(x_augmented)
return sample_mean, sample_variance, x_augmented
def _propagate_samples_through_level_gradient(
self, dsample_mean_dx, dsample_var_dx, i_fidelity, sample_variance, x_augmented
):
"""
Calculates gradients of sample mean and variance with respect to X when propagated through a level
:param dsample_mean_dx: Gradients of mean prediction of samples from previous level
:param dsample_var_dx: Gradients of variance prediction of samples from previous level
:param i_fidelity: level index
:param sample_variance: The variance prediction of the samples from the previous level
:param x_augmented: The X input for this level augmented with the outputs
from the previous level as the final column
"""
# Convert variance derivative to std derivative
clipped_var = np.clip(sample_variance, 1e-10, np.inf)
dsample_std_dx = dsample_var_dx / (2 * np.sqrt(clipped_var))
# Calculate gradients of samples wrt x
# This calculates a (n_samples**(i-1), n_samples, n_dims) matrix
tmp = self.monte_carlo_rand_numbers[:, np.newaxis, :] * dsample_std_dx[:, np.newaxis, :]
dsamples_dx = dsample_mean_dx[np.newaxis, :, :] + tmp
dsamples_dx_reshaped = np.reshape(dsamples_dx, (self.n_samples ** i_fidelity, dsample_std_dx.shape[1]))
# Get partial derivatives of mean and variance with respect to
# both X and output of previous fidelity
dmean_dx, dvar_dx = self.models[i_fidelity].predictive_gradients(x_augmented)
dmean_dx = dmean_dx[:, :, 0]
# Combine partial derivatives to get full derivative wrt X
dsample_mean_dx = dmean_dx[:, :-1] + dmean_dx[:, [-1]] * dsamples_dx_reshaped
dsample_var_dx = dvar_dx[:, :-1] + dvar_dx[:, [-1]] * dsamples_dx_reshaped
return dsample_mean_dx, dsample_var_dx
def optimize(self) -> None:
"""
Optimize the full model
"""
# Optimize the first model
self.models[0].optimize_restarts(self.optimization_restarts, verbose=self.verbose, robust=True)
# Optimize all models for all fidelities but lowest fidelity
for i in range(1, self.n_fidelities):
# Set new X values because previous model has changed
is_ith_fidelity = self.X[:, self._fidelity_idx] == i
previous_mean, _ = self._predict_deterministic(self.X[is_ith_fidelity, :-1], i)
augmented_input = np.concatenate([self.models[i].X[:, :-1], previous_mean], axis=1)
self.models[i].set_X(augmented_input)
# Optimize parameters
self.models[i].optimize_restarts(self.optimization_restarts, verbose=self.verbose, robust=True)
def get_f_minimum(self) -> np.ndarray:
"""
Get the minimum of the top fidelity model.
"""
return np.min(self.models[-1].Y)
def _predict_deterministic(self, X, fidelity):
"""
This is a helper function when predicting at points that are in the training set. It is more efficient than
sampling and is useful when constructing the model.
"""
# Predict at first fidelity
mean, variance = self.models[0].predict(X)
for i in range(1, fidelity):
# Push samples through this fidelity model
augmented_input = np.concatenate([X, mean], axis=1)
mean, variance = self.models[i].predict(augmented_input)
return mean, variance
|
|
import idaapi
from . import exceptions
from awesome.context import ignored
DEFMASK = idaapi.BADADDR
ENUM_ERROR_MAP = {
idaapi.ENUM_MEMBER_ERROR_NAME:
(exceptions.SarkErrorEnumMemberName, "already have member with this name (bad name)"),
idaapi.ENUM_MEMBER_ERROR_VALUE:
(exceptions.SarkErrorEnumMemberValue, "already have 256 members with this value"),
idaapi.ENUM_MEMBER_ERROR_ENUM:
(exceptions.SarkErrorEnumMemberEnum, "bad enum id"),
idaapi.ENUM_MEMBER_ERROR_MASK:
(exceptions.SarkErrorEnumMemberMask, "bad bmask"),
idaapi.ENUM_MEMBER_ERROR_ILLV:
(exceptions.SarkErrorEnumMemberIllv, "bad bmask and value combination (~bmask & value != 0)"),
}
def _enum_member_error(err, eid, name, value, bitmask):
"""Format enum member error."""
exception, msg = ENUM_ERROR_MAP[err]
enum_name = idaapi.get_enum_name(eid)
return exception(('add_enum_member(enum="{}", member="{}", value={}, bitmask=0x{:08X}) '
'failed: {}').format(
enum_name,
name,
value,
bitmask,
msg
))
def _get_enum(name):
"""Get an existing enum ID"""
eid = idaapi.get_enum(name)
if eid == idaapi.BADADDR:
raise exceptions.EnumNotFound('Enum "{}" does not exist.'.format(name))
return eid
def add_enum(name=None, index=None, flags=idaapi.hexflag(), bitfield=False):
"""Create a new enum.
Args:
name: Name of the enum to create.
index: The index of the enum. Leave at default to append the enum as the last enum.
flags: Enum type flags.
bitfield: Is the enum a bitfield.
Returns:
An `Enum` object.
"""
if name is not None:
with ignored(exceptions.EnumNotFound):
_get_enum(name)
raise exceptions.EnumAlreadyExists()
if index is None or index < 0:
index = idaapi.get_enum_qty()
eid = idaapi.add_enum(index, name, flags)
if eid == idaapi.BADADDR:
raise exceptions.EnumCreationFailed('Failed creating enum "{}"'.format(name))
if bitfield:
idaapi.set_enum_bf(eid, bitfield)
return Enum(eid=eid)
def remove_enum(name):
"""Delete an enum by name."""
eid = _get_enum(name)
idaapi.del_enum(eid)
def _add_enum_member(enum, name, value, bitmask=DEFMASK):
"""Add an enum member."""
error = idaapi.add_enum_member(enum, name, value, bitmask)
if error:
raise _enum_member_error(error, enum, name, value, bitmask)
class EnumComments(object):
"""Enum comments retrieval and manipulation."""
def __init__(self, eid):
super(EnumComments, self).__init__()
self._eid = eid
@property
def regular(self):
return idaapi.get_enum_cmt(self._eid, False)
@regular.setter
def regular(self, comment):
success = idaapi.set_enum_cmt(self._eid, comment, False)
if not success:
raise exceptions.CantSetEnumComment("Cant set enum comment.")
@property
def repeat(self):
return idaapi.get_enum_cmt(self._eid, True)
@repeat.setter
def repeat(self, comment):
success = idaapi.set_enum_cmt(self._eid, comment, True)
if not success:
raise exceptions.CantSetEnumComment("Cant set enum comment.")
def __repr__(self):
return ("EnumComments("
"name={name!r},"
" reqular={regular!r},"
" repeat={repeat!r})").format(
name=Enum(eid=self._eid).name,
regular=self.regular,
repeat=self.repeat, )
class EnumMembers(object):
"""Enum members retrieval and manipulation."""
def __init__(self, eid):
super(EnumMembers, self).__init__()
self._eid = eid
def __len__(self):
"""Number of members in the enum"""
return idaapi.get_enum_size(self._eid)
def __iter__(self):
"""Iterate all members of the enum"""
return (EnumMember(cid) for cid in _iter_enum_constant_ids(self._eid))
def add(self, name, value, bitmask=DEFMASK):
"""Add an enum member
Args:
name: Name of the member
value: value of the member
bitmask: bitmask. Only use if enum is a bitfield.
"""
_add_enum_member(self._eid, name, value, bitmask)
def __getitem__(self, name):
"""Get an enum member by name."""
for enum_member in self:
if enum_member.name == name:
return enum_member
raise KeyError("No member named {!r}".format(name))
def remove(self, name):
"""Remove an enum member by name"""
member = self[name]
serial = member.serial
value = member.value
bmask = member.bmask
success = idaapi.del_enum_member(self._eid, value, serial, bmask)
if not success:
raise exceptions.CantDeleteEnumMember("Can't delete enum member {!r}.".format(name))
def __repr__(self):
return "<EnumMembers(enum={!r}, members={{{}}})>".format(
Enum(eid=self._eid).name,
", ".join("{member.name!r}: {member.value!r}".format(member=member) for member in self)
)
class Enum(object):
"""An enum in the IDB"""
def __init__(self, name=None, eid=None):
"""
Get an existing enum.
Only provide one of `name` and `eid`.
Args:
name: Name of the enum
eid: Enum ID
"""
if None not in (name, eid):
raise TypeError("Provide only a `name` or an `eid`.")
self._eid = eid or _get_enum(name)
self._comments = EnumComments(self._eid)
@property
def name(self):
"""Name of the enum"""
return idaapi.get_enum_name(self.eid)
@name.setter
def name(self, name):
"""Set the enum name."""
success = idaapi.set_enum_name(self.eid, name)
if not success:
raise exceptions.CantRenameEnum("Cant rename enum {!r} to {!r}.".format(self.name, name))
@property
def width(self):
"""Width of the enum"""
return idaapi.get_enum_width(self.eid)
@property
def comments(self):
"""Enum comments"""
return self._comments
@property
def eid(self):
"""Enum ID"""
return self._eid
@property
def flag(self):
"""Enum flags (bitness, and display type)"""
return idaapi.get_enum_flag(self.eid)
@property
def bitfield(self):
"""Is the enum a bitfield"""
return idaapi.is_bf(self.eid)
@bitfield.setter
def bitfield(self, value):
success = idaapi.set_enum_bf(self.eid, value)
if not success:
raise exceptions.CantSetEnumBitfield()
@property
def members(self):
"""Get the enum members."""
return EnumMembers(self.eid)
@property
def is_from_til(self):
"""Is from type library?"""
return idaapi.is_enum_fromtil(self.eid)
def __repr__(self):
return "<Enum(name={!r})>".format(self.name)
class EnumMemberComments(object):
"""Enum member comments retrieval and manipulation."""
def __init__(self, cid):
super(EnumMemberComments, self).__init__()
self._cid = cid
@property
def regular(self):
return idaapi.get_enum_member_cmt(self._cid, False)
@regular.setter
def regular(self, comment):
success = idaapi.set_enum_member_cmt(self._cid, comment, False)
if not success:
raise exceptions.CantSetEnumMemberComment("Cant set enum member comment.")
@property
def repeat(self):
return idaapi.get_enum_member_cmt(self._cid, True)
@repeat.setter
def repeat(self, comment):
success = idaapi.set_enum_member_cmt(self._cid, comment, True)
if not success:
raise exceptions.CantSetEnumMemberComment("Cant set enum member comment.")
def __repr__(self):
enum_member = EnumMember(self._cid)
return ("EnumMemberComments("
"name={name!r},"
" reqular={regular!r},"
" repeat={repeat!r})").format(
name="{}.{}".format(enum_member.parent.name, enum_member.name),
regular=self.regular,
repeat=self.repeat, )
class EnumMember(object):
"""A member of an enum."""
def __init__(self, cid):
super(EnumMember, self).__init__()
self._cid = cid
self._comments = EnumMemberComments(self._cid)
@property
def cid(self):
"""Get the constant ID"""
return self._cid
@property
def name(self):
"""Get the member name."""
return idaapi.get_enum_member_name(self.cid)
@name.setter
def name(self, name):
"""Set the member name.
Note that a member name cannot appear in other enums, or generally
anywhere else in the IDB.
"""
success = idaapi.set_enum_member_name(self.cid, name)
if not success:
raise exceptions.CantRenameEnumMember(
"Failed renaming {!r} to {!r}. Does the name exist somewhere else?".format(self.name, name))
@property
def bmask(self):
"""Get the bitmask"""
return idaapi.get_enum_member_bmask(self.cid)
bitmask = bmask
@property
def value(self):
"""Get the member value"""
return idaapi.get_enum_member_value(self.cid)
@property
def comments(self):
"""Get the member comments"""
return self._comments
@property
def serial(self):
"""Get the member serial (among members of the same value)."""
return idaapi.get_enum_member_serial(self.cid)
@property
def parent(self):
"""Get the enum holding the member."""
return Enum(eid=idaapi.get_enum_member_enum(self.cid))
def __repr__(self):
return "<EnumMember(name='{}.{}')>".format(self.parent.name, self.name)
def _iter_bitmasks(eid):
"""Iterate all bitmasks in a given enum.
Note that while `DEFMASK` indicates no-more-bitmasks, it is also a
valid bitmask value. The only way to tell if it exists is when iterating
the serials.
"""
bitmask = idaapi.get_first_bmask(eid)
yield bitmask
while bitmask != DEFMASK:
bitmask = idaapi.get_next_bmask(eid, bitmask)
yield bitmask
def _iter_enum_member_values(eid, bitmask):
"""Iterate member values with given bitmask inside the enum
Note that `DEFMASK` can either indicate end-of-values or a valid value.
Iterate serials to tell apart.
"""
value = idaapi.get_first_enum_member(eid, bitmask)
yield value
while value != DEFMASK:
value = idaapi.get_next_enum_member(eid, value, bitmask)
yield value
def _iter_serial_enum_member(eid, value, bitmask):
"""Iterate serial and CID of enum members with given value and bitmask.
Here only valid values are returned, as `idaapi.BADNODE` always indicates
an invalid member.
"""
cid, serial = idaapi.get_first_serial_enum_member(eid, value, bitmask)
while cid != idaapi.BADNODE:
yield cid, serial
cid, serial = idaapi.get_next_serial_enum_member(cid, serial)
def _iter_enum_constant_ids(eid):
"""Iterate the constant IDs of all members in the given enum"""
for bitmask in _iter_bitmasks(eid):
for value in _iter_enum_member_values(eid, bitmask):
for cid, serial in _iter_serial_enum_member(eid, value, bitmask):
yield cid
def _iter_enum_ids():
"""Iterate the IDs of all enums in the IDB"""
for index in xrange(idaapi.get_enum_qty()):
yield idaapi.getn_enum(index)
def enums():
"""Iterate all enums in the IDB"""
return (Enum(eid=eid) for eid in _iter_enum_ids())
|
|
"""Test the zerproc lights."""
from unittest.mock import MagicMock, patch
import pytest
import pyzerproc
from homeassistant import setup
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_RGB_COLOR,
ATTR_XY_COLOR,
SCAN_INTERVAL,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
)
from homeassistant.components.zerproc.light import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.util.dt as dt_util
from tests.common import MockConfigEntry, async_fire_time_changed
@pytest.fixture
async def mock_entry(hass):
"""Create a mock light entity."""
return MockConfigEntry(domain=DOMAIN)
@pytest.fixture
async def mock_light(hass, mock_entry):
"""Create a mock light entity."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry.add_to_hass(hass)
light = MagicMock(spec=pyzerproc.Light)
light.address = "AA:BB:CC:DD:EE:FF"
light.name = "LEDBlue-CCDDEEFF"
light.is_connected.return_value = False
mock_state = pyzerproc.LightState(False, (0, 0, 0))
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
return_value=[light],
), patch.object(light, "connect"), patch.object(
light, "get_state", return_value=mock_state
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
light.is_connected.return_value = True
return light
async def test_init(hass, mock_entry):
"""Test platform setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry.add_to_hass(hass)
mock_light_1 = MagicMock(spec=pyzerproc.Light)
mock_light_1.address = "AA:BB:CC:DD:EE:FF"
mock_light_1.name = "LEDBlue-CCDDEEFF"
mock_light_1.is_connected.return_value = True
mock_light_2 = MagicMock(spec=pyzerproc.Light)
mock_light_2.address = "11:22:33:44:55:66"
mock_light_2.name = "LEDBlue-33445566"
mock_light_2.is_connected.return_value = True
mock_state_1 = pyzerproc.LightState(False, (0, 0, 0))
mock_state_2 = pyzerproc.LightState(True, (0, 80, 255))
mock_light_1.get_state.return_value = mock_state_1
mock_light_2.get_state.return_value = mock_state_2
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
return_value=[mock_light_1, mock_light_2],
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_OFF
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
}
state = hass.states.get("light.ledblue_33445566")
assert state.state == STATE_ON
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-33445566",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
ATTR_BRIGHTNESS: 255,
ATTR_HS_COLOR: (221.176, 100.0),
ATTR_RGB_COLOR: (0, 80, 255),
ATTR_XY_COLOR: (0.138, 0.08),
}
with patch.object(hass.loop, "stop"):
await hass.async_stop()
assert mock_light_1.disconnect.called
assert mock_light_2.disconnect.called
async def test_discovery_exception(hass, mock_entry):
"""Test platform setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry.add_to_hass(hass)
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
side_effect=pyzerproc.ZerprocException("TEST"),
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
# The exception should be captured and no entities should be added
assert len(hass.data[DOMAIN]["addresses"]) == 0
async def test_connect_exception(hass, mock_entry):
"""Test platform setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry.add_to_hass(hass)
mock_light_1 = MagicMock(spec=pyzerproc.Light)
mock_light_1.address = "AA:BB:CC:DD:EE:FF"
mock_light_1.name = "LEDBlue-CCDDEEFF"
mock_light_1.is_connected.return_value = False
mock_light_2 = MagicMock(spec=pyzerproc.Light)
mock_light_2.address = "11:22:33:44:55:66"
mock_light_2.name = "LEDBlue-33445566"
mock_light_2.is_connected.return_value = False
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
return_value=[mock_light_1, mock_light_2],
), patch.object(
mock_light_1, "connect", side_effect=pyzerproc.ZerprocException("TEST")
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
# The exception connecting to light 1 should be captured, but light 2
# should still be added
assert len(hass.data[DOMAIN]["addresses"]) == 1
async def test_remove_entry(hass, mock_light, mock_entry):
"""Test platform setup."""
with patch.object(mock_light, "disconnect") as mock_disconnect:
await hass.config_entries.async_remove(mock_entry.entry_id)
assert mock_disconnect.called
async def test_remove_entry_exceptions_caught(hass, mock_light, mock_entry):
"""Assert that disconnect exceptions are caught."""
with patch.object(
mock_light, "disconnect", side_effect=pyzerproc.ZerprocException("Mock error")
) as mock_disconnect:
await hass.config_entries.async_remove(mock_entry.entry_id)
assert mock_disconnect.called
async def test_light_turn_on(hass, mock_light):
"""Test ZerprocLight turn_on."""
utcnow = dt_util.utcnow()
with patch.object(mock_light, "turn_on") as mock_turn_on:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_on.assert_called()
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_BRIGHTNESS: 25},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(25, 25, 25)
# Make sure no discovery calls are made while we emulate time passing
with patch("homeassistant.components.zerproc.light.pyzerproc.discover"):
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(True, (175, 150, 220)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_BRIGHTNESS: 25},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(19, 17, 25)
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_HS_COLOR: (50, 50)},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(220, 201, 110)
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(True, (75, 75, 75)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_HS_COLOR: (50, 50)},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(75, 68, 37)
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{
ATTR_ENTITY_ID: "light.ledblue_ccddeeff",
ATTR_BRIGHTNESS: 200,
ATTR_HS_COLOR: (75, 75),
},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(162, 200, 50)
async def test_light_turn_off(hass, mock_light):
"""Test ZerprocLight turn_on."""
with patch.object(mock_light, "turn_off") as mock_turn_off:
await hass.services.async_call(
"light",
"turn_off",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called()
async def test_light_update(hass, mock_light):
"""Test ZerprocLight update."""
utcnow = dt_util.utcnow()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_OFF
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
}
# Make sure no discovery calls are made while we emulate time passing
with patch("homeassistant.components.zerproc.light.pyzerproc.discover"):
# Test an exception during discovery
with patch.object(
mock_light, "get_state", side_effect=pyzerproc.ZerprocException("TEST")
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_UNAVAILABLE
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
}
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(False, (200, 128, 100)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_OFF
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
}
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(True, (175, 150, 220)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_ON
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_ICON: "mdi:string-lights",
ATTR_BRIGHTNESS: 220,
ATTR_HS_COLOR: (261.429, 31.818),
ATTR_RGB_COLOR: (202, 173, 255),
ATTR_XY_COLOR: (0.291, 0.232),
}
|
|
from datetime import date, datetime, timedelta
from decimal import Decimal
import pytest
from freezegun import freeze_time
from notifications_utils.timezones import convert_utc_to_bst
from app import db
from app.dao.fact_billing_dao import (
delete_billing_data_for_service_for_day,
fetch_billing_data_for_day,
fetch_billing_totals_for_year,
fetch_daily_volumes_for_platform,
fetch_letter_costs_and_totals_for_all_services,
fetch_letter_line_items_for_all_services,
fetch_monthly_billing_for_year,
fetch_sms_billing_for_all_services,
fetch_sms_free_allowance_remainder_until_date,
fetch_usage_year_for_organisation,
fetch_volumes_by_service,
get_rate,
get_rates_for_billing,
)
from app.dao.organisation_dao import dao_add_service_to_organisation
from app.models import NOTIFICATION_STATUS_TYPES, FactBilling
from tests.app.db import (
create_annual_billing,
create_ft_billing,
create_letter_rate,
create_notification,
create_notification_history,
create_organisation,
create_rate,
create_service,
create_service_data_retention,
create_template,
set_up_usage_data,
)
def set_up_yearly_data():
service = create_service()
sms_template = create_template(service=service, template_type="sms")
email_template = create_template(service=service, template_type="email")
letter_template = create_template(service=service, template_type="letter")
start_date = date(2016, 3, 31)
end_date = date(2017, 4, 2)
for n in range((end_date - start_date).days):
dt = start_date + timedelta(days=n)
create_ft_billing(bst_date=dt, template=sms_template, rate=0.162)
create_ft_billing(bst_date=dt, template=email_template, rate=0)
create_ft_billing(bst_date=dt, template=letter_template, rate=0.33, postage='second')
create_ft_billing(bst_date=dt, template=letter_template, rate=0.30, postage='second')
return service
def test_fetch_billing_data_for_today_includes_data_with_the_right_key_type(notify_db_session):
service = create_service()
template = create_template(service=service, template_type="email")
for key_type in ['normal', 'test', 'team']:
create_notification(template=template, status='delivered', key_type=key_type)
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert len(results) == 1
assert results[0].notifications_sent == 2
@pytest.mark.parametrize("notification_type", ["email", "sms", "letter"])
def test_fetch_billing_data_for_day_only_calls_query_for_permission_type(notify_db_session, notification_type):
service = create_service(service_permissions=[notification_type])
email_template = create_template(service=service, template_type="email")
sms_template = create_template(service=service, template_type="sms")
letter_template = create_template(service=service, template_type="letter")
create_notification(template=email_template, status='delivered')
create_notification(template=sms_template, status='delivered')
create_notification(template=letter_template, status='delivered')
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(process_day=today.date(), check_permissions=True)
assert len(results) == 1
@pytest.mark.parametrize("notification_type", ["email", "sms", "letter"])
def test_fetch_billing_data_for_day_only_calls_query_for_all_channels(notify_db_session, notification_type):
service = create_service(service_permissions=[notification_type])
email_template = create_template(service=service, template_type="email")
sms_template = create_template(service=service, template_type="sms")
letter_template = create_template(service=service, template_type="letter")
create_notification(template=email_template, status='delivered')
create_notification(template=sms_template, status='delivered')
create_notification(template=letter_template, status='delivered')
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(process_day=today.date(), check_permissions=False)
assert len(results) == 3
@freeze_time('2018-04-02 01:20:00')
def test_fetch_billing_data_for_today_includes_data_with_the_right_date(notify_db_session):
process_day = datetime(2018, 4, 1, 13, 30, 0)
service = create_service()
template = create_template(service=service, template_type="email")
create_notification(template=template, status='delivered', created_at=process_day)
create_notification(template=template, status='delivered', created_at=datetime(2018, 3, 31, 23, 23, 23))
create_notification(template=template, status='delivered', created_at=datetime(2018, 3, 31, 20, 23, 23))
create_notification(template=template, status='sending', created_at=process_day + timedelta(days=1))
day_under_test = convert_utc_to_bst(process_day)
results = fetch_billing_data_for_day(day_under_test.date())
assert len(results) == 1
assert results[0].notifications_sent == 2
def test_fetch_billing_data_for_day_is_grouped_by_template_and_notification_type(notify_db_session):
service = create_service()
email_template = create_template(service=service, template_type="email")
sms_template = create_template(service=service, template_type="sms")
create_notification(template=email_template, status='delivered')
create_notification(template=sms_template, status='delivered')
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert len(results) == 2
assert results[0].notifications_sent == 1
assert results[1].notifications_sent == 1
def test_fetch_billing_data_for_day_is_grouped_by_service(notify_db_session):
service_1 = create_service()
service_2 = create_service(service_name='Service 2')
email_template = create_template(service=service_1)
sms_template = create_template(service=service_2)
create_notification(template=email_template, status='delivered')
create_notification(template=sms_template, status='delivered')
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert len(results) == 2
assert results[0].notifications_sent == 1
assert results[1].notifications_sent == 1
def test_fetch_billing_data_for_day_is_grouped_by_provider(notify_db_session):
service = create_service()
template = create_template(service=service)
create_notification(template=template, status='delivered', sent_by='mmg')
create_notification(template=template, status='delivered', sent_by='firetext')
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert len(results) == 2
assert results[0].notifications_sent == 1
assert results[1].notifications_sent == 1
def test_fetch_billing_data_for_day_is_grouped_by_rate_mulitplier(notify_db_session):
service = create_service()
template = create_template(service=service)
create_notification(template=template, status='delivered', rate_multiplier=1)
create_notification(template=template, status='delivered', rate_multiplier=2)
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert len(results) == 2
assert results[0].notifications_sent == 1
assert results[1].notifications_sent == 1
def test_fetch_billing_data_for_day_is_grouped_by_international(notify_db_session):
service = create_service()
sms_template = create_template(service=service)
letter_template = create_template(template_type='letter', service=service)
create_notification(template=sms_template, status='delivered', international=True)
create_notification(template=sms_template, status='delivered', international=False)
create_notification(template=letter_template, status='delivered', international=True)
create_notification(template=letter_template, status='delivered', international=False)
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert len(results) == 4
assert all(result.notifications_sent == 1 for result in results)
def test_fetch_billing_data_for_day_is_grouped_by_notification_type(notify_db_session):
service = create_service()
sms_template = create_template(service=service, template_type='sms')
email_template = create_template(service=service, template_type='email')
letter_template = create_template(service=service, template_type='letter')
create_notification(template=sms_template, status='delivered')
create_notification(template=sms_template, status='delivered')
create_notification(template=sms_template, status='delivered')
create_notification(template=email_template, status='delivered')
create_notification(template=email_template, status='delivered')
create_notification(template=letter_template, status='delivered')
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert len(results) == 3
notification_types = [x.notification_type for x in results]
assert len(notification_types) == 3
def test_fetch_billing_data_for_day_groups_by_postage(notify_db_session):
service = create_service()
letter_template = create_template(service=service, template_type='letter')
email_template = create_template(service=service, template_type='email')
create_notification(template=letter_template, status='delivered', postage='first')
create_notification(template=letter_template, status='delivered', postage='first')
create_notification(template=letter_template, status='delivered', postage='second')
create_notification(template=letter_template, status='delivered', postage='europe')
create_notification(template=letter_template, status='delivered', postage='rest-of-world')
create_notification(template=email_template, status='delivered')
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert len(results) == 5
def test_fetch_billing_data_for_day_groups_by_sent_by(notify_db_session):
service = create_service()
letter_template = create_template(service=service, template_type='letter')
email_template = create_template(service=service, template_type='email')
create_notification(template=letter_template, status='delivered', postage='second', sent_by='dvla')
create_notification(template=letter_template, status='delivered', postage='second', sent_by='dvla')
create_notification(template=letter_template, status='delivered', postage='second', sent_by=None)
create_notification(template=email_template, status='delivered')
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert len(results) == 2
def test_fetch_billing_data_for_day_groups_by_page_count(notify_db_session):
service = create_service()
letter_template = create_template(service=service, template_type='letter')
email_template = create_template(service=service, template_type='email')
create_notification(template=letter_template, status='delivered', postage='second', billable_units=1)
create_notification(template=letter_template, status='delivered', postage='second', billable_units=1)
create_notification(template=letter_template, status='delivered', postage='second', billable_units=2)
create_notification(template=email_template, status='delivered')
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert len(results) == 3
def test_fetch_billing_data_for_day_sets_postage_for_emails_and_sms_to_none(notify_db_session):
service = create_service()
sms_template = create_template(service=service, template_type='sms')
email_template = create_template(service=service, template_type='email')
create_notification(template=sms_template, status='delivered')
create_notification(template=email_template, status='delivered')
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert len(results) == 2
assert results[0].postage == 'none'
assert results[1].postage == 'none'
def test_fetch_billing_data_for_day_returns_empty_list(notify_db_session):
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(today.date())
assert results == []
def test_fetch_billing_data_for_day_uses_correct_table(notify_db_session):
service = create_service()
create_service_data_retention(service, notification_type='email', days_of_retention=3)
sms_template = create_template(service=service, template_type='sms')
email_template = create_template(service=service, template_type='email')
five_days_ago = datetime.utcnow() - timedelta(days=5)
create_notification(template=sms_template, status='delivered', created_at=five_days_ago)
create_notification_history(template=email_template, status='delivered', created_at=five_days_ago)
results = fetch_billing_data_for_day(process_day=five_days_ago.date(), service_id=service.id)
assert len(results) == 2
assert results[0].notification_type == 'sms'
assert results[0].notifications_sent == 1
assert results[1].notification_type == 'email'
assert results[1].notifications_sent == 1
def test_fetch_billing_data_for_day_returns_list_for_given_service(notify_db_session):
service = create_service()
service_2 = create_service(service_name='Service 2')
template = create_template(service=service)
template_2 = create_template(service=service_2)
create_notification(template=template, status='delivered')
create_notification(template=template_2, status='delivered')
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(process_day=today.date(), service_id=service.id)
assert len(results) == 1
assert results[0].service_id == service.id
def test_fetch_billing_data_for_day_bills_correctly_for_status(notify_db_session):
service = create_service()
sms_template = create_template(service=service, template_type='sms')
email_template = create_template(service=service, template_type='email')
letter_template = create_template(service=service, template_type='letter')
for status in NOTIFICATION_STATUS_TYPES:
create_notification(template=sms_template, status=status)
create_notification(template=email_template, status=status)
create_notification(template=letter_template, status=status)
today = convert_utc_to_bst(datetime.utcnow())
results = fetch_billing_data_for_day(process_day=today.date(), service_id=service.id)
sms_results = [x for x in results if x.notification_type == 'sms']
email_results = [x for x in results if x.notification_type == 'email']
letter_results = [x for x in results if x.notification_type == 'letter']
# we expect as many rows as we check for notification types
assert 6 == sms_results[0].notifications_sent
assert 4 == email_results[0].notifications_sent
assert 3 == letter_results[0].notifications_sent
def test_get_rates_for_billing(notify_db_session):
create_rate(start_date=datetime.utcnow(), value=12, notification_type='email')
create_rate(start_date=datetime.utcnow(), value=22, notification_type='sms')
create_rate(start_date=datetime.utcnow(), value=33, notification_type='email')
create_letter_rate(start_date=datetime.utcnow(), rate=0.66, post_class='first')
create_letter_rate(start_date=datetime.utcnow(), rate=0.33, post_class='second')
create_letter_rate(start_date=datetime.utcnow(), rate=0.84, post_class='europe')
create_letter_rate(start_date=datetime.utcnow(), rate=0.84, post_class='rest-of-world')
non_letter_rates, letter_rates = get_rates_for_billing()
assert len(non_letter_rates) == 3
assert len(letter_rates) == 4
@freeze_time('2017-06-01 12:00')
def test_get_rate(notify_db_session):
create_rate(start_date=datetime(2017, 5, 30, 23, 0), value=1.2, notification_type='email')
create_rate(start_date=datetime(2017, 5, 30, 23, 0), value=2.2, notification_type='sms')
create_rate(start_date=datetime(2017, 5, 30, 23, 0), value=3.3, notification_type='email')
create_letter_rate(start_date=datetime(2017, 5, 30, 23, 0), rate=0.66, post_class='first')
create_letter_rate(start_date=datetime(2017, 5, 30, 23, 0), rate=0.3, post_class='second')
non_letter_rates, letter_rates = get_rates_for_billing()
rate = get_rate(non_letter_rates=non_letter_rates, letter_rates=letter_rates, notification_type='sms',
date=date(2017, 6, 1))
letter_rate = get_rate(non_letter_rates=non_letter_rates, letter_rates=letter_rates,
notification_type='letter',
crown=True,
letter_page_count=1,
date=date(2017, 6, 1))
assert rate == 2.2
assert letter_rate == Decimal('0.3')
@pytest.mark.parametrize("letter_post_class,expected_rate", [
("first", "0.61"),
("second", "0.35"),
("europe", "0.92"),
("rest-of-world", "1.05"),
])
def test_get_rate_filters_letters_by_post_class(notify_db_session, letter_post_class, expected_rate):
create_letter_rate(start_date=datetime(2017, 5, 30, 23, 0), sheet_count=2, rate=0.61, post_class='first')
create_letter_rate(start_date=datetime(2017, 5, 30, 23, 0), sheet_count=2, rate=0.35, post_class='second')
create_letter_rate(start_date=datetime(2017, 5, 30, 23, 0), sheet_count=2, rate=0.92, post_class='europe')
create_letter_rate(start_date=datetime(2017, 5, 30, 23, 0), sheet_count=2, rate=1.05, post_class='rest-of-world')
non_letter_rates, letter_rates = get_rates_for_billing()
rate = get_rate(non_letter_rates, letter_rates, "letter", datetime(2018, 10, 1), True, 2, letter_post_class)
assert rate == Decimal(expected_rate)
@pytest.mark.parametrize("date,expected_rate", [(datetime(2018, 9, 30), '0.33'), (datetime(2018, 10, 1), '0.35')])
def test_get_rate_chooses_right_rate_depending_on_date(notify_db_session, date, expected_rate):
create_letter_rate(start_date=datetime(2016, 1, 1, 0, 0), sheet_count=2, rate=0.33, post_class='second')
create_letter_rate(start_date=datetime(2018, 9, 30, 23, 0), sheet_count=2, rate=0.35, post_class='second')
non_letter_rates, letter_rates = get_rates_for_billing()
rate = get_rate(non_letter_rates, letter_rates, "letter", date, True, 2, "second")
assert rate == Decimal(expected_rate)
def test_get_rate_for_letters_when_page_count_is_zero(notify_db_session):
non_letter_rates, letter_rates = get_rates_for_billing()
letter_rate = get_rate(non_letter_rates=non_letter_rates, letter_rates=letter_rates,
notification_type='letter',
crown=True,
letter_page_count=0,
date=datetime.utcnow())
assert letter_rate == 0
def test_fetch_monthly_billing_for_year(notify_db_session):
service = create_service()
template = create_template(service=service, template_type="sms")
for i in range(1, 31):
create_ft_billing(bst_date='2018-06-{}'.format(i),
template=template,
rate_multiplier=2,
rate=0.162)
for i in range(1, 32):
create_ft_billing(bst_date='2018-07-{}'.format(i),
template=template,
rate=0.158)
results = fetch_monthly_billing_for_year(service_id=service.id, year=2018)
assert len(results) == 2
assert str(results[0].month) == "2018-06-01"
assert results[0].notifications_sent == 30
assert results[0].billable_units == Decimal('60')
assert results[0].rate == Decimal('0.162')
assert results[0].notification_type == 'sms'
assert results[0].postage == 'none'
assert str(results[1].month) == "2018-07-01"
assert results[1].notifications_sent == 31
assert results[1].billable_units == Decimal('31')
assert results[1].rate == Decimal('0.158')
assert results[1].notification_type == 'sms'
assert results[1].postage == 'none'
@freeze_time('2018-08-01 13:30:00')
def test_fetch_monthly_billing_for_year_adds_data_for_today(notify_db_session):
service = create_service()
template = create_template(service=service, template_type="email")
for i in range(1, 32):
create_ft_billing(bst_date='2018-07-{}'.format(i), template=template)
create_notification(template=template, status='delivered')
assert db.session.query(FactBilling.bst_date).count() == 31
results = fetch_monthly_billing_for_year(service_id=service.id,
year=2018)
assert db.session.query(FactBilling.bst_date).count() == 32
assert len(results) == 2
def test_fetch_monthly_billing_for_year_return_financial_year(notify_db_session):
service = set_up_yearly_data()
results = fetch_monthly_billing_for_year(service.id, 2016)
# returns 3 rows, per month, returns financial year april to end of march
# Orders by Month
assert len(results) == 48
assert str(results[0].month) == "2016-04-01"
assert results[0].notification_type == 'email'
assert results[0].notifications_sent == 30
assert results[0].billable_units == 30
assert results[0].rate == Decimal('0')
assert str(results[1].month) == "2016-04-01"
assert results[1].notification_type == 'letter'
assert results[1].notifications_sent == 30
assert results[1].billable_units == 30
assert results[1].rate == Decimal('0.30')
assert str(results[1].month) == "2016-04-01"
assert results[2].notification_type == 'letter'
assert results[2].notifications_sent == 30
assert results[2].billable_units == 30
assert results[2].rate == Decimal('0.33')
assert str(results[3].month) == "2016-04-01"
assert results[3].notification_type == 'sms'
assert results[3].notifications_sent == 30
assert results[3].billable_units == 30
assert results[3].rate == Decimal('0.162')
assert str(results[4].month) == "2016-05-01"
assert str(results[47].month) == "2017-03-01"
def test_fetch_billing_totals_for_year(notify_db_session):
service = set_up_yearly_data()
results = fetch_billing_totals_for_year(service_id=service.id, year=2016)
assert len(results) == 4
assert results[0].notification_type == 'email'
assert results[0].notifications_sent == 365
assert results[0].billable_units == 365
assert results[0].rate == Decimal('0')
assert results[1].notification_type == 'letter'
assert results[1].notifications_sent == 365
assert results[1].billable_units == 365
assert results[1].rate == Decimal('0.3')
assert results[2].notification_type == 'letter'
assert results[2].notifications_sent == 365
assert results[2].billable_units == 365
assert results[2].rate == Decimal('0.33')
assert results[3].notification_type == 'sms'
assert results[3].notifications_sent == 365
assert results[3].billable_units == 365
assert results[3].rate == Decimal('0.162')
def test_delete_billing_data(notify_db_session):
service_1 = create_service(service_name='1')
service_2 = create_service(service_name='2')
sms_template = create_template(service_1, 'sms')
email_template = create_template(service_1, 'email')
other_service_template = create_template(service_2, 'sms')
existing_rows_to_delete = [ # noqa
create_ft_billing('2018-01-01', sms_template, billable_unit=1),
create_ft_billing('2018-01-01', email_template, billable_unit=2)
]
other_day = create_ft_billing('2018-01-02', sms_template, billable_unit=3)
other_service = create_ft_billing('2018-01-01', other_service_template, billable_unit=4)
delete_billing_data_for_service_for_day('2018-01-01', service_1.id)
current_rows = FactBilling.query.all()
assert sorted(x.billable_units for x in current_rows) == sorted(
[other_day.billable_units, other_service.billable_units]
)
def test_fetch_sms_free_allowance_remainder_until_date_with_two_services(notify_db_session):
service = create_service(service_name='has free allowance')
template = create_template(service=service)
org = create_organisation(name="Org for {}".format(service.name))
dao_add_service_to_organisation(service=service, organisation_id=org.id)
create_annual_billing(service_id=service.id, free_sms_fragment_limit=10, financial_year_start=2016)
create_ft_billing(template=template, bst_date=datetime(2016, 4, 20), billable_unit=2, rate=0.11)
create_ft_billing(template=template, bst_date=datetime(2016, 5, 20), billable_unit=3, rate=0.11)
service_2 = create_service(service_name='used free allowance')
template_2 = create_template(service=service_2)
org_2 = create_organisation(name="Org for {}".format(service_2.name))
dao_add_service_to_organisation(service=service_2, organisation_id=org_2.id)
create_annual_billing(service_id=service_2.id, free_sms_fragment_limit=20, financial_year_start=2016)
create_ft_billing(template=template_2, bst_date=datetime(2016, 4, 20), billable_unit=12, rate=0.11)
create_ft_billing(template=template_2, bst_date=datetime(2016, 4, 22), billable_unit=10, rate=0.11)
create_ft_billing(template=template_2, bst_date=datetime(2016, 5, 20), billable_unit=3, rate=0.11)
results = fetch_sms_free_allowance_remainder_until_date(datetime(2016, 5, 1)).all()
assert len(results) == 2
service_result = [row for row in results if row[0] == service.id]
assert service_result[0] == (service.id, 10, 2, 8)
service_2_result = [row for row in results if row[0] == service_2.id]
assert service_2_result[0] == (service_2.id, 20, 22, 0)
def test_fetch_sms_billing_for_all_services_for_first_quarter(notify_db_session):
# This test is useful because the inner query resultset is empty.
service = create_service(service_name='a - has free allowance')
template = create_template(service=service)
org = create_organisation(name="Org for {}".format(service.name))
dao_add_service_to_organisation(service=service, organisation_id=org.id)
create_annual_billing(service_id=service.id, free_sms_fragment_limit=25000, financial_year_start=2019)
create_ft_billing(template=template, bst_date=datetime(2019, 4, 20), billable_unit=44, rate=0.11)
results = fetch_sms_billing_for_all_services(datetime(2019, 4, 1), datetime(2019, 5, 30))
assert len(results) == 1
assert results[0] == (org.name, org.id, service.name, service.id, 25000, Decimal('0.11'), 24956, 44, 0,
Decimal('0'))
def test_fetch_sms_billing_for_all_services_with_remainder(notify_db_session):
service_1 = create_service(service_name='a - has free allowance')
template = create_template(service=service_1)
org = create_organisation(name="Org for {}".format(service_1.name))
dao_add_service_to_organisation(service=service_1, organisation_id=org.id)
create_annual_billing(service_id=service_1.id, free_sms_fragment_limit=10, financial_year_start=2019)
create_ft_billing(template=template, bst_date=datetime(2019, 4, 20), billable_unit=2, rate=0.11)
create_ft_billing(template=template, bst_date=datetime(2019, 5, 20), billable_unit=2, rate=0.11)
create_ft_billing(template=template, bst_date=datetime(2019, 5, 22), billable_unit=1, rate=0.11)
service_2 = create_service(service_name='b - used free allowance')
template_2 = create_template(service=service_2)
org_2 = create_organisation(name="Org for {}".format(service_2.name))
dao_add_service_to_organisation(service=service_2, organisation_id=org_2.id)
create_annual_billing(service_id=service_2.id, free_sms_fragment_limit=10, financial_year_start=2019)
create_ft_billing(template=template_2, bst_date=datetime(2019, 4, 20), billable_unit=12, rate=0.11)
create_ft_billing(template=template_2, bst_date=datetime(2019, 5, 20), billable_unit=3, rate=0.11)
service_3 = create_service(service_name='c - partial allowance')
template_3 = create_template(service=service_3)
org_3 = create_organisation(name="Org for {}".format(service_3.name))
dao_add_service_to_organisation(service=service_3, organisation_id=org_3.id)
create_annual_billing(service_id=service_3.id, free_sms_fragment_limit=10, financial_year_start=2019)
create_ft_billing(template=template_3, bst_date=datetime(2019, 4, 20), billable_unit=5, rate=0.11)
create_ft_billing(template=template_3, bst_date=datetime(2019, 5, 20), billable_unit=7, rate=0.11)
service_4 = create_service(service_name='d - email only')
email_template = create_template(service=service_4, template_type='email')
org_4 = create_organisation(name="Org for {}".format(service_4.name))
dao_add_service_to_organisation(service=service_4, organisation_id=org_4.id)
create_annual_billing(service_id=service_4.id, free_sms_fragment_limit=10, financial_year_start=2019)
create_ft_billing(template=email_template, bst_date=datetime(2019, 5, 22), notifications_sent=5,
billable_unit=0, rate=0)
results = fetch_sms_billing_for_all_services(datetime(2019, 5, 1), datetime(2019, 5, 31))
assert len(results) == 3
expected_results = [
# sms_remainder is 5, because "service_1" has 5 sms_billing_units. 2 of them for a period before
# the requested report's start date.
{
"organisation_name": org.name, "organisation_id": org.id, "service_name": service_1.name,
"service_id": service_1.id, "free_sms_fragment_limit": 10, "sms_rate": Decimal('0.11'), "sms_remainder": 5,
"sms_billable_units": 3, "chargeable_billable_sms": 0, "sms_cost": Decimal('0.00')
},
# sms remainder is 0, because this service sent SMS worth 15 billable units, 12 of which were sent
# before requested report's start date
{
"organisation_name": org_2.name, "organisation_id": org_2.id, "service_name": service_2.name,
"service_id": service_2.id, "free_sms_fragment_limit": 10, "sms_rate": Decimal('0.11'), "sms_remainder": 0,
"sms_billable_units": 3, "chargeable_billable_sms": 3, "sms_cost": Decimal('0.33')
},
# sms remainder is 0, because this service sent SMS worth 12 billable units, 5 of which were sent
# before requested report's start date
{
"organisation_name": org_3.name, "organisation_id": org_3.id, "service_name": service_3.name,
"service_id": service_3.id, "free_sms_fragment_limit": 10, "sms_rate": Decimal('0.11'), "sms_remainder": 0,
"sms_billable_units": 7, "chargeable_billable_sms": 2, "sms_cost": Decimal('0.22')
},
]
assert [dict(result) for result in results] == expected_results
def test_fetch_sms_billing_for_all_services_without_an_organisation_appears(notify_db_session):
fixtures = set_up_usage_data(datetime(2019, 5, 1))
results = fetch_sms_billing_for_all_services(datetime(2019, 5, 1), datetime(2019, 5, 31))
assert len(results) == 3
expected_results = [
# sms_remainder is 5, because service_1_sms_and_letter has 5 sms_billing_units. 2 of them for a period before
# the requested report's start date.
{
"organisation_name": fixtures["org_1"].name, "organisation_id": fixtures["org_1"].id,
"service_name": fixtures["service_1_sms_and_letter"].name,
"service_id": fixtures["service_1_sms_and_letter"].id,
"free_sms_fragment_limit": 10, "sms_rate": Decimal('0.11'), "sms_remainder": 5,
"sms_billable_units": 3, "chargeable_billable_sms": 0, "sms_cost": Decimal('0.00')
},
# sms remainder is 0, because this service sent SMS worth 15 billable units, 12 of which were sent
# before requested report's start date
{
"organisation_name": None, "organisation_id": None,
"service_name": fixtures["service_with_sms_without_org"].name,
"service_id": fixtures["service_with_sms_without_org"].id, "free_sms_fragment_limit": 10,
"sms_rate": Decimal('0.11'), "sms_remainder": 0,
"sms_billable_units": 3, "chargeable_billable_sms": 3, "sms_cost": Decimal('0.33')
},
{
"organisation_name": None, "organisation_id": None,
"service_name": fixtures["service_with_sms_within_allowance"].name,
"service_id": fixtures["service_with_sms_within_allowance"].id, "free_sms_fragment_limit": 10,
"sms_rate": Decimal('0.11'), "sms_remainder": 8,
"sms_billable_units": 2, "chargeable_billable_sms": 0, "sms_cost": Decimal('0.00')
},
]
assert [dict(result) for result in results] == expected_results
def test_fetch_letter_costs_and_totals_for_all_services(notify_db_session):
fixtures = set_up_usage_data(datetime(2019, 6, 1))
results = fetch_letter_costs_and_totals_for_all_services(datetime(2019, 6, 1), datetime(2019, 9, 30))
assert len(results) == 3
assert results[0] == (
fixtures["org_1"].name, fixtures["org_1"].id,
fixtures["service_1_sms_and_letter"].name, fixtures["service_1_sms_and_letter"].id,
8, Decimal('3.40')
)
assert results[1] == (
fixtures["org_for_service_with_letters"].name, fixtures["org_for_service_with_letters"].id,
fixtures["service_with_letters"].name, fixtures["service_with_letters"].id,
22, Decimal('14.00')
)
assert results[2] == (
None, None,
fixtures["service_with_letters_without_org"].name, fixtures["service_with_letters_without_org"].id,
18, Decimal('24.45')
)
def test_fetch_letter_line_items_for_all_service(notify_db_session):
fixtures = set_up_usage_data(datetime(2019, 6, 1))
results = fetch_letter_line_items_for_all_services(datetime(2019, 6, 1), datetime(2019, 9, 30))
assert len(results) == 7
assert results[0] == (
fixtures["org_1"].name, fixtures["org_1"].id,
fixtures["service_1_sms_and_letter"].name, fixtures["service_1_sms_and_letter"].id,
Decimal('0.45'), 'second', 6
)
assert results[1] == (
fixtures["org_1"].name, fixtures["org_1"].id,
fixtures["service_1_sms_and_letter"].name, fixtures["service_1_sms_and_letter"].id,
Decimal("0.35"), 'first', 2
)
assert results[2] == (
fixtures["org_for_service_with_letters"].name, fixtures["org_for_service_with_letters"].id,
fixtures["service_with_letters"].name, fixtures["service_with_letters"].id,
Decimal("0.65"), 'second', 20
)
assert results[3] == (
fixtures["org_for_service_with_letters"].name, fixtures["org_for_service_with_letters"].id,
fixtures["service_with_letters"].name, fixtures["service_with_letters"].id,
Decimal("0.50"), 'first', 2
)
assert results[4] == (
None, None,
fixtures["service_with_letters_without_org"].name, fixtures["service_with_letters_without_org"].id,
Decimal("0.35"), 'second', 2
)
assert results[5] == (
None, None,
fixtures["service_with_letters_without_org"].name, fixtures["service_with_letters_without_org"].id,
Decimal("0.50"), 'first', 1
)
assert results[6] == (
None, None,
fixtures["service_with_letters_without_org"].name, fixtures["service_with_letters_without_org"].id,
Decimal("1.55"), 'international', 15
)
@freeze_time('2019-06-01 13:30')
def test_fetch_usage_year_for_organisation(notify_db_session):
fixtures = set_up_usage_data(datetime(2019, 5, 1))
service_with_emails_for_org = create_service(service_name='Service with emails for org')
dao_add_service_to_organisation(
service=service_with_emails_for_org,
organisation_id=fixtures["org_1"].id
)
template = create_template(service=service_with_emails_for_org, template_type='email')
create_ft_billing(bst_date=datetime(2019, 5, 1),
template=template,
notifications_sent=1100)
results = fetch_usage_year_for_organisation(fixtures["org_1"].id, 2019)
assert len(results) == 3
first_row = results[str(fixtures["service_1_sms_and_letter"].id)]
assert first_row['service_id'] == fixtures["service_1_sms_and_letter"].id
assert first_row['service_name'] == fixtures["service_1_sms_and_letter"].name
assert first_row['free_sms_limit'] == 10
assert first_row['sms_remainder'] == 5 # because there are 5 billable units
assert first_row['chargeable_billable_sms'] == 0
assert first_row['sms_cost'] == 0.0
assert first_row['letter_cost'] == 3.4
assert first_row['emails_sent'] == 0
second_row = results[str(service_with_emails_for_org.id)]
assert second_row['service_id'] == service_with_emails_for_org.id
assert second_row['service_name'] == service_with_emails_for_org.name
assert second_row['free_sms_limit'] == 0
assert second_row['sms_remainder'] == 0
assert second_row['chargeable_billable_sms'] == 0
assert second_row['sms_cost'] == 0
assert second_row['letter_cost'] == 0
assert second_row['emails_sent'] == 1100
third_row = results[str(fixtures["service_with_out_ft_billing_this_year"].id)]
assert third_row['service_id'] == fixtures["service_with_out_ft_billing_this_year"].id
assert third_row['service_name'] == fixtures["service_with_out_ft_billing_this_year"].name
assert third_row['free_sms_limit'] == 10
assert third_row['sms_remainder'] == 10
assert third_row['chargeable_billable_sms'] == 0
assert third_row['sms_cost'] == 0
assert third_row['letter_cost'] == 0
assert third_row['emails_sent'] == 0
def test_fetch_usage_year_for_organisation_populates_ft_billing_for_today(notify_db_session):
create_letter_rate(start_date=datetime.utcnow() - timedelta(days=1))
create_rate(start_date=datetime.utcnow() - timedelta(days=1), value=0.65, notification_type='sms')
new_org = create_organisation(name='New organisation')
service = create_service()
template = create_template(service=service)
dao_add_service_to_organisation(service=service, organisation_id=new_org.id)
current_year = datetime.utcnow().year
create_annual_billing(service_id=service.id, free_sms_fragment_limit=10, financial_year_start=current_year)
assert FactBilling.query.count() == 0
create_notification(template=template, status='delivered')
results = fetch_usage_year_for_organisation(organisation_id=new_org.id, year=current_year)
assert len(results) == 1
assert FactBilling.query.count() == 1
@freeze_time('2020-02-27 13:30')
def test_fetch_usage_year_for_organisation_only_returns_data_for_live_services(notify_db_session):
org = create_organisation(name='Organisation without live services')
live_service = create_service(restricted=False)
sms_template = create_template(service=live_service)
trial_service = create_service(restricted=True, service_name='trial_service')
email_template = create_template(service=trial_service, template_type='email')
trial_sms_template = create_template(service=trial_service, template_type='sms')
trial_letter_template = create_template(service=trial_service, template_type='letter')
dao_add_service_to_organisation(service=live_service, organisation_id=org.id)
dao_add_service_to_organisation(service=trial_service, organisation_id=org.id)
create_ft_billing(bst_date=datetime.utcnow().date(), template=sms_template, rate=0.0158,
billable_unit=19, notifications_sent=19)
create_ft_billing(bst_date=datetime.utcnow().date(), template=email_template, billable_unit=0,
notifications_sent=100)
create_ft_billing(bst_date=datetime.utcnow().date(), template=trial_sms_template, billable_unit=200, rate=0.0158,
notifications_sent=100)
create_ft_billing(bst_date=datetime.utcnow().date(), template=trial_letter_template, billable_unit=40, rate=0.30,
notifications_sent=20)
results = fetch_usage_year_for_organisation(organisation_id=org.id, year=2019)
assert len(results) == 1
assert results[str(live_service.id)]['sms_billable_units'] == 19
assert results[str(live_service.id)]['emails_sent'] == 0
def test_fetch_daily_volumes_for_platform(
notify_db_session, sample_template, sample_email_template, sample_letter_template
):
create_ft_billing(bst_date='2022-02-03', template=sample_template,
notifications_sent=10, billable_unit=10)
create_ft_billing(bst_date='2022-02-03', template=sample_template,
notifications_sent=10, billable_unit=30, international=True)
create_ft_billing(bst_date='2022-02-03', template=sample_email_template, notifications_sent=10)
create_ft_billing(bst_date='2022-02-03', template=sample_letter_template, notifications_sent=5,
billable_unit=5, rate=0.39)
create_ft_billing(bst_date='2022-02-03', template=sample_letter_template, notifications_sent=5,
billable_unit=10, rate=0.44)
create_ft_billing(bst_date='2022-02-04', template=sample_template,
notifications_sent=20, billable_unit=40)
create_ft_billing(bst_date='2022-02-04', template=sample_template,
notifications_sent=10, billable_unit=20, rate_multiplier=3)
create_ft_billing(bst_date='2022-02-04', template=sample_email_template, notifications_sent=50)
create_ft_billing(bst_date='2022-02-04', template=sample_letter_template, notifications_sent=20, billable_unit=40)
results = fetch_daily_volumes_for_platform(start_date='2022-02-03', end_date='2022-02-04')
assert len(results) == 2
assert results[0].bst_date == '2022-02-03'
assert results[0].sms_totals == 20
assert results[0].sms_fragment_totals == 40
assert results[0].sms_chargeable_units == 40
assert results[0].email_totals == 10
assert results[0].letter_totals == 10
assert results[0].letter_sheet_totals == 15
assert results[1].bst_date == '2022-02-04'
assert results[1].sms_totals == 30
assert results[1].sms_fragment_totals == 60
assert results[1].sms_chargeable_units == 100
assert results[1].email_totals == 50
assert results[1].letter_totals == 20
assert results[1].letter_sheet_totals == 40
def test_fetch_volumes_by_service(notify_db_session):
set_up_usage_data(datetime(2022, 2, 1))
results = fetch_volumes_by_service(start_date=datetime(2022, 2, 1), end_date=datetime(2022, 2, 28))
assert len(results) == 4
assert results[0].service_name == 'a - with sms and letter'
assert results[0].organisation_name == 'Org for a - with sms and letter'
assert results[0].free_allowance == 10
assert results[0].sms_notifications == 2
assert results[0].sms_chargeable_units == 3
assert results[0].email_totals == 0
assert results[0].letter_totals == 4
assert results[0].letter_sheet_totals == 6
assert float(results[0].letter_cost) == 1.6
assert results[1].service_name == 'f - without ft_billing'
assert results[1].organisation_name == 'Org for a - with sms and letter'
assert results[1].free_allowance == 10
assert results[1].sms_notifications == 0
assert results[1].sms_chargeable_units == 0
assert results[1].email_totals == 0
assert results[1].letter_totals == 0
assert results[1].letter_sheet_totals == 0
assert float(results[1].letter_cost) == 0
assert results[2].service_name == 'b - chargeable sms'
assert not results[2].organisation_name
assert results[2].free_allowance == 10
assert results[2].sms_notifications == 2
assert results[2].sms_chargeable_units == 3
assert results[2].email_totals == 0
assert results[2].letter_totals == 0
assert results[2].letter_sheet_totals == 0
assert float(results[2].letter_cost) == 0
assert results[3].service_name == 'e - sms within allowance'
assert not results[3].organisation_name
assert results[3].free_allowance == 10
assert results[3].sms_notifications == 1
assert results[3].sms_chargeable_units == 2
assert results[3].email_totals == 0
assert results[3].letter_totals == 0
assert results[3].letter_sheet_totals == 0
assert float(results[3].letter_cost) == 0
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import stat
import unittest
import mock
from pyfakefs import fake_filesystem_unittest
from catapult_base import cloud_storage
from catapult_base.dependency_manager import archive_info
from catapult_base.dependency_manager import cloud_storage_info
from catapult_base.dependency_manager import exceptions
class CloudStorageInfoTest(unittest.TestCase):
def testInitCloudStorageInfoErrors(self):
# Must specify cloud storage information atomically.
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, None, None, None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', None, None, None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, 'cs_hash', None, None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, None, 'download_path', None)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, None, None, 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
None, 'cs_hash', 'download_path', 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', None, 'download_path', 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', 'cs_hash', None, 'cs_remote_path')
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', 'cs_hash', 'download_path', None)
def testInitWithVersion(self):
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, None, None,
'cs_remote_path', version_in_cs='version_in_cs')
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, 'cs_hash',
'download_path', 'cs_remote_path', version_in_cs='version_in_cs')
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', 'download_path', 'cs_remote_path',
version_in_cs='version_in_cs')
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertEqual('version_in_cs', cs_info._version_in_cs)
def testInitWithArchiveInfoErrors(self):
zip_info = archive_info.ArchiveInfo(
'download_path', 'unzip_location', 'path_within_archive')
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, None, None, None,
archive_info=zip_info)
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, None, None, None,
'cs_remote_path', archive_info=zip_info)
self.assertRaises(
ValueError, cloud_storage_info.CloudStorageInfo, 'cs_bucket', 'cs_hash',
None, 'cs_remote_path', archive_info=zip_info)
self.assertRaises(ValueError, cloud_storage_info.CloudStorageInfo,
'cs_bucket', 'cs_hash',
'cs_remote_path', None, version_in_cs='version',
archive_info=zip_info)
def testInitWithArchiveInfo(self):
zip_info = archive_info.ArchiveInfo(
'download_path', 'unzip_location', 'path_within_archive')
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', 'download_path', 'cs_remote_path',
archive_info=zip_info)
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertEqual(zip_info, cs_info._archive_info)
self.assertFalse(cs_info._version_in_cs)
def testInitWithVersionAndArchiveInfo(self):
zip_info = archive_info.ArchiveInfo(
'download_path', 'unzip_location', 'path_within_archive')
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', 'download_path',
'cs_remote_path', version_in_cs='version_in_cs',
archive_info=zip_info)
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertEqual(zip_info, cs_info._archive_info)
self.assertEqual('version_in_cs', cs_info._version_in_cs)
def testInitMinimumCloudStorageInfo(self):
cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket',
'cs_hash', 'download_path',
'cs_remote_path')
self.assertEqual('cs_hash', cs_info._cs_hash)
self.assertEqual('cs_bucket', cs_info._cs_bucket)
self.assertEqual('cs_remote_path', cs_info._cs_remote_path)
self.assertEqual('download_path', cs_info._download_path)
self.assertFalse(cs_info._version_in_cs)
self.assertFalse(cs_info._archive_info)
class TestGetRemotePath(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
self.config_path = '/test/dep_config.json'
self.fs.CreateFile(self.config_path, contents='{}')
self.download_path = '/foo/download_path'
self.fs.CreateFile(
self.download_path, contents='1010110', st_mode=stat.S_IWOTH)
self.cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', self.download_path, 'cs_remote_path',
version_in_cs='1.2.3.4',)
def tearDown(self):
self.tearDownPyfakefs()
@mock.patch(
'catapult_base.cloud_storage.GetIfHashChanged')
def testGetRemotePathNoArchive(self, cs_get_mock):
def _GetIfHashChangedMock(cs_path, download_path, bucket, file_hash):
del cs_path, bucket, file_hash
if not os.path.exists(download_path):
self.fs.CreateFile(download_path, contents='1010001010101010110101')
cs_get_mock.side_effect = _GetIfHashChangedMock
# All of the needed information is given, and the downloaded path exists
# after calling cloud storage.
self.assertEqual(
os.path.abspath(self.download_path),
self.cs_info.GetRemotePath())
self.assertTrue(os.stat(self.download_path).st_mode & stat.S_IXUSR)
# All of the needed information is given, but the downloaded path doesn't
# exists after calling cloud storage.
self.fs.RemoveObject(self.download_path)
cs_get_mock.side_effect = [True]
self.assertRaises(
exceptions.FileNotFoundError, self.cs_info.GetRemotePath)
@mock.patch(
'catapult_base.dependency_manager.dependency_manager_util.UnzipArchive')
@mock.patch(
'catapult_base.dependency_manager.cloud_storage_info.cloud_storage.GetIfHashChanged') # pylint: disable=line-too-long
def testGetRemotePathWithArchive(self, cs_get_mock, unzip_mock):
def _GetIfHashChangedMock(cs_path, download_path, bucket, file_hash):
del cs_path, bucket, file_hash
if not os.path.exists(download_path):
self.fs.CreateFile(download_path, contents='1010001010101010110101')
cs_get_mock.side_effect = _GetIfHashChangedMock
unzip_path = os.path.join(
os.path.dirname(self.download_path), 'unzip_dir')
path_within_archive = os.path.join('path', 'within', 'archive')
dep_path = os.path.join(unzip_path, path_within_archive)
def _UnzipFileMock(archive_file, unzip_location, tmp_location=None):
del archive_file, tmp_location
self.fs.CreateFile(dep_path)
self.fs.CreateFile(os.path.join(unzip_location, 'extra', 'path'))
self.fs.CreateFile(os.path.join(unzip_location, 'another_extra_path'))
unzip_mock.side_effect = _UnzipFileMock
self.assertFalse(os.path.exists(dep_path))
zip_info = archive_info.ArchiveInfo(
self.download_path, unzip_path, path_within_archive)
self.cs_info = cloud_storage_info.CloudStorageInfo(
'cs_bucket', 'cs_hash', self.download_path, 'cs_remote_path',
version_in_cs='1.2.3.4', archive_info=zip_info)
self.assertFalse(unzip_mock.called)
self.assertEqual(
os.path.abspath(dep_path),
self.cs_info.GetRemotePath())
self.assertTrue(os.path.exists(dep_path))
self.assertTrue(stat.S_IMODE(os.stat(os.path.abspath(dep_path)).st_mode) &
(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR))
unzip_mock.assert_called_once_with(self.download_path, unzip_path)
# Should not need to unzip a second time, but should return the same path.
unzip_mock.reset_mock()
self.assertTrue(os.path.exists(dep_path))
self.assertEqual(
os.path.abspath(dep_path),
self.cs_info.GetRemotePath())
self.assertTrue(stat.S_IMODE(os.stat(os.path.abspath(dep_path)).st_mode) &
(stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR))
self.assertFalse(unzip_mock.called)
@mock.patch(
'catapult_base.cloud_storage.GetIfHashChanged')
def testGetRemotePathCloudStorageErrors(self, cs_get_mock):
cs_get_mock.side_effect = cloud_storage.CloudStorageError
self.assertRaises(cloud_storage.CloudStorageError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.ServerError
self.assertRaises(cloud_storage.ServerError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.NotFoundError
self.assertRaises(cloud_storage.NotFoundError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.PermissionError
self.assertRaises(cloud_storage.PermissionError,
self.cs_info.GetRemotePath)
cs_get_mock.side_effect = cloud_storage.CredentialsError
self.assertRaises(cloud_storage.CredentialsError,
self.cs_info.GetRemotePath)
|
|
# -*- coding: utf-8 -*-
import logging
import re
from babelfish import Language, language_converters
from guessit import guessit
from requests import Session
from . import ParserBeautifulSoup, Provider
from ..cache import SHOW_EXPIRATION_TIME, region
from ..exceptions import AuthenticationError, ConfigurationError, DownloadLimitExceeded
from ..matches import guess_matches
from ..subtitle import Subtitle, fix_line_ending
from ..utils import sanitize
from ..video import Episode
logger = logging.getLogger(__name__)
language_converters.register('addic7ed = subliminal.converters.addic7ed:Addic7edConverter')
# Series cell matching regex
show_cells_re = re.compile(b'<td class="version">.*?</td>', re.DOTALL)
#: Series header parsing regex
series_year_re = re.compile(r'^(?P<series>[ \w\'.:(),*&!?-]+?)(?: \((?P<year>\d{4})\))?$')
class Addic7edSubtitle(Subtitle):
"""Addic7ed Subtitle."""
provider_name = 'addic7ed'
def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, year, version,
download_link):
super(Addic7edSubtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link)
self.series = series
self.season = season
self.episode = episode
self.title = title
self.year = year
self.version = version
self.download_link = download_link
@property
def id(self):
return self.download_link
@property
def info(self):
return '{series}{yopen}{year}{yclose} s{season:02d}e{episode:02d}{topen}{title}{tclose}{version}'.format(
series=self.series, season=self.season, episode=self.episode, title=self.title, year=self.year or '',
version=self.version, yopen=' (' if self.year else '', yclose=')' if self.year else '',
topen=' - ' if self.title else '', tclose=' - ' if self.version else ''
)
def get_matches(self, video):
# series name
matches = guess_matches(video, {
'title': self.series,
'season': self.season,
'episode': self.episode,
'episode_title': self.title,
'year': self.year,
'release_group': self.version,
})
# resolution
if video.resolution and self.version and video.resolution in self.version.lower():
matches.add('resolution')
# other properties
if self.version:
matches |= guess_matches(video, guessit(self.version, {'type': 'episode'}), partial=True)
return matches
class Addic7edProvider(Provider):
"""Addic7ed Provider."""
languages = {Language('por', 'BR')} | {Language(l) for l in [
'ara', 'aze', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu', 'ell', 'eng', 'eus', 'fas', 'fin', 'fra', 'glg',
'heb', 'hrv', 'hun', 'hye', 'ind', 'ita', 'jpn', 'kor', 'mkd', 'msa', 'nld', 'nor', 'pol', 'por', 'ron', 'rus',
'slk', 'slv', 'spa', 'sqi', 'srp', 'swe', 'tha', 'tur', 'ukr', 'vie', 'zho'
]}
video_types = (Episode,)
server_url = 'http://www.addic7ed.com/'
subtitle_class = Addic7edSubtitle
def __init__(self, username=None, password=None):
if any((username, password)) and not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = self.user_agent
# login
if self.username and self.password:
logger.info('Logging in')
data = {'username': self.username, 'password': self.password, 'Submit': 'Log in'}
r = self.session.post(self.server_url + 'dologin.php', data, allow_redirects=False, timeout=10)
if r.status_code != 302:
raise AuthenticationError(self.username)
logger.debug('Logged in')
self.logged_in = True
def terminate(self):
# logout
if self.logged_in:
logger.info('Logging out')
r = self.session.get(self.server_url + 'logout.php', timeout=10)
r.raise_for_status()
logger.debug('Logged out')
self.logged_in = False
self.session.close()
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _get_show_ids(self):
"""Get the ``dict`` of show ids per series by querying the `shows.php` page.
:return: show id per series, lower case and without quotes.
:rtype: dict
"""
# get the show page
logger.info('Getting show ids')
r = self.session.get(self.server_url + 'shows.php', timeout=10)
r.raise_for_status()
# LXML parser seems to fail when parsing Addic7ed.com HTML markup.
# Last known version to work properly is 3.6.4 (next version, 3.7.0, fails)
# Assuming the site's markup is bad, and stripping it down to only contain what's needed.
show_cells = re.findall(show_cells_re, r.content)
if show_cells:
soup = ParserBeautifulSoup(b''.join(show_cells), ['lxml', 'html.parser'])
else:
# If RegEx fails, fall back to original r.content and use 'html.parser'
soup = ParserBeautifulSoup(r.content, ['html.parser'])
# populate the show ids
show_ids = {}
for show in soup.select('td.version > h3 > a[href^="/show/"]'):
show_ids[sanitize(show.text)] = int(show['href'][6:])
logger.debug('Found %d show ids', len(show_ids))
return show_ids
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _search_show_id(self, series, year=None):
"""Search the show id from the `series` and `year`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:return: the show id, if found.
:rtype: int
"""
# addic7ed doesn't support search with quotes
series = series.replace('\'', ' ')
# build the params
series_year = '%s %d' % (series, year) if year is not None else series
params = {'search': series_year, 'Submit': 'Search'}
# make the search
logger.info('Searching show ids with %r', params)
r = self.session.get(self.server_url + 'srch.php', params=params, timeout=10)
r.raise_for_status()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# get the suggestion
suggestion = soup.select('span.titulo > a[href^="/show/"]')
if not suggestion:
logger.warning('Show id not found: no suggestion')
return None
if not sanitize(suggestion[0].i.text.replace('\'', ' ')) == sanitize(series_year):
logger.warning('Show id not found: suggestion does not match')
return None
show_id = int(suggestion[0]['href'][6:])
logger.debug('Found show id %d', show_id)
return show_id
def get_show_id(self, series, year=None, country_code=None):
"""Get the best matching show id for `series`, `year` and `country_code`.
First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:param country_code: country code of the series, if any.
:type country_code: str
:return: the show id, if found.
:rtype: int
"""
series_sanitized = sanitize(series).lower()
show_ids = self._get_show_ids()
show_id = None
# attempt with country
if not show_id and country_code:
logger.debug('Getting show id with country')
show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower()))
# attempt with year
if not show_id and year:
logger.debug('Getting show id with year')
show_id = show_ids.get('%s %d' % (series_sanitized, year))
# attempt clean
if not show_id:
logger.debug('Getting show id')
show_id = show_ids.get(series_sanitized)
# search as last resort
if not show_id:
logger.warning('Series %s not found in show ids', series)
show_id = self._search_show_id(series)
return show_id
def query(self, show_id, series, season, year=None, country=None):
# get the page of the season of the show
logger.info('Getting the page of show id %d, season %d', show_id, season)
r = self.session.get(self.server_url + 'show/%d' % show_id, params={'season': season}, timeout=10)
r.raise_for_status()
if not r.content:
# Provider returns a status of 304 Not Modified with an empty content
# raise_for_status won't raise exception for that status code
logger.debug('No data returned from provider')
return []
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# loop over subtitle rows
match = series_year_re.match(soup.select('#header font')[0].text.strip()[:-10])
series = match.group('series')
year = int(match.group('year')) if match.group('year') else None
subtitles = []
for row in soup.select('tr.epeven'):
cells = row('td')
# ignore incomplete subtitles
status = cells[5].text
if status != 'Completed':
logger.debug('Ignoring subtitle with status %s', status)
continue
# read the item
language = Language.fromaddic7ed(cells[3].text)
hearing_impaired = bool(cells[6].text)
page_link = self.server_url + cells[2].a['href'][1:]
season = int(cells[0].text)
episode = int(cells[1].text)
title = cells[2].text
version = cells[4].text
download_link = cells[9].a['href'][1:]
subtitle = self.subtitle_class(language, hearing_impaired, page_link, series, season, episode, title, year,
version, download_link)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
# lookup show_id
titles = [video.series] + video.alternative_series
show_id = None
for title in titles:
show_id = self.get_show_id(title, video.year)
if show_id is not None:
break
# query for subtitles with the show_id
if show_id is not None:
subtitles = [s for s in self.query(show_id, title, video.season, video.year)
if s.language in languages and s.episode == video.episode]
if subtitles:
return subtitles
else:
logger.error('No show id found for %r (%r)', video.series, {'year': video.year})
return []
def download_subtitle(self, subtitle):
# download the subtitle
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(self.server_url + subtitle.download_link, headers={'Referer': subtitle.page_link},
timeout=10)
r.raise_for_status()
if not r.content:
# Provider returns a status of 304 Not Modified with an empty content
# raise_for_status won't raise exception for that status code
logger.debug('Unable to download subtitle. No data returned from provider')
return
# detect download limit exceeded
if r.headers['Content-Type'] == 'text/html':
raise DownloadLimitExceeded
subtitle.content = fix_line_ending(r.content)
|
|
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.util.logutils import log_function
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.api.constants import EventTypes
from synapse.api.errors import AuthError
from synapse.api.auth import AuthEventTypes
from synapse.events.snapshot import EventContext
from collections import namedtuple
import logging
import hashlib
logger = logging.getLogger(__name__)
KeyStateTuple = namedtuple("KeyStateTuple", ("context", "type", "state_key"))
SIZE_OF_CACHE = 1000
EVICTION_TIMEOUT_SECONDS = 20
class _StateCacheEntry(object):
def __init__(self, state, state_group, ts):
self.state = state
self.state_group = state_group
class StateHandler(object):
""" Responsible for doing state conflict resolution.
"""
def __init__(self, hs):
self.clock = hs.get_clock()
self.store = hs.get_datastore()
self.hs = hs
# dict of set of event_ids -> _StateCacheEntry.
self._state_cache = None
def start_caching(self):
logger.debug("start_caching")
self._state_cache = ExpiringCache(
cache_name="state_cache",
clock=self.clock,
max_len=SIZE_OF_CACHE,
expiry_ms=EVICTION_TIMEOUT_SECONDS*1000,
reset_expiry_on_get=True,
)
self._state_cache.start()
@defer.inlineCallbacks
def get_current_state(self, room_id, event_type=None, state_key=""):
""" Retrieves the current state for the room. This is done by
calling `get_latest_events_in_room` to get the leading edges of the
event graph and then resolving any of the state conflicts.
This is equivalent to getting the state of an event that were to send
next before receiving any new events.
If `event_type` is specified, then the method returns only the one
event (or None) with that `event_type` and `state_key`.
:returns map from (type, state_key) to event
"""
event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
cache = None
if self._state_cache is not None:
cache = self._state_cache.get(frozenset(event_ids), None)
if cache:
cache.ts = self.clock.time_msec()
state = cache.state
else:
res = yield self.resolve_state_groups(room_id, event_ids)
state = res[1]
if event_type:
defer.returnValue(state.get((event_type, state_key)))
return
defer.returnValue(state)
@defer.inlineCallbacks
def compute_event_context(self, event, old_state=None, outlier=False):
""" Fills out the context with the `current state` of the graph. The
`current state` here is defined to be the state of the event graph
just before the event - i.e. it never includes `event`
If `event` has `auth_events` then this will also fill out the
`auth_events` field on `context` from the `current_state`.
Args:
event (EventBase)
Returns:
an EventContext
"""
context = EventContext()
if outlier:
# If this is an outlier, then we know it shouldn't have any current
# state. Certainly store.get_current_state won't return any, and
# persisting the event won't store the state group.
if old_state:
context.current_state = {
(s.type, s.state_key): s for s in old_state
}
else:
context.current_state = {}
context.prev_state_events = []
context.state_group = None
defer.returnValue(context)
if old_state:
context.current_state = {
(s.type, s.state_key): s for s in old_state
}
context.state_group = None
if event.is_state():
key = (event.type, event.state_key)
if key in context.current_state:
replaces = context.current_state[key]
if replaces.event_id != event.event_id: # Paranoia check
event.unsigned["replaces_state"] = replaces.event_id
context.prev_state_events = []
defer.returnValue(context)
if event.is_state():
ret = yield self.resolve_state_groups(
event.room_id, [e for e, _ in event.prev_events],
event_type=event.type,
state_key=event.state_key,
)
else:
ret = yield self.resolve_state_groups(
event.room_id, [e for e, _ in event.prev_events],
)
group, curr_state, prev_state = ret
context.current_state = curr_state
context.state_group = group if not event.is_state() else None
if event.is_state():
key = (event.type, event.state_key)
if key in context.current_state:
replaces = context.current_state[key]
event.unsigned["replaces_state"] = replaces.event_id
context.prev_state_events = prev_state
defer.returnValue(context)
@defer.inlineCallbacks
@log_function
def resolve_state_groups(self, room_id, event_ids, event_type=None, state_key=""):
""" Given a list of event_ids this method fetches the state at each
event, resolves conflicts between them and returns them.
:returns a Deferred tuple of (`state_group`, `state`, `prev_state`).
`state_group` is the name of a state group if one and only one is
involved. `state` is a map from (type, state_key) to event, and
`prev_state` is a list of event ids.
"""
logger.debug("resolve_state_groups event_ids %s", event_ids)
if self._state_cache is not None:
cache = self._state_cache.get(frozenset(event_ids), None)
if cache and cache.state_group:
cache.ts = self.clock.time_msec()
prev_state = cache.state.get((event_type, state_key), None)
if prev_state:
prev_state = prev_state.event_id
prev_states = [prev_state]
else:
prev_states = []
defer.returnValue(
(cache.state_group, cache.state, prev_states)
)
state_groups = yield self.store.get_state_groups(
room_id, event_ids
)
logger.debug(
"resolve_state_groups state_groups %s",
state_groups.keys()
)
group_names = set(state_groups.keys())
if len(group_names) == 1:
name, state_list = state_groups.items().pop()
state = {
(e.type, e.state_key): e
for e in state_list
}
prev_state = state.get((event_type, state_key), None)
if prev_state:
prev_state = prev_state.event_id
prev_states = [prev_state]
else:
prev_states = []
if self._state_cache is not None:
cache = _StateCacheEntry(
state=state,
state_group=name,
ts=self.clock.time_msec()
)
self._state_cache[frozenset(event_ids)] = cache
defer.returnValue((name, state, prev_states))
new_state, prev_states = self._resolve_events(
state_groups.values(), event_type, state_key
)
if self._state_cache is not None:
cache = _StateCacheEntry(
state=new_state,
state_group=None,
ts=self.clock.time_msec()
)
self._state_cache[frozenset(event_ids)] = cache
defer.returnValue((None, new_state, prev_states))
def resolve_events(self, state_sets, event):
if event.is_state():
return self._resolve_events(
state_sets, event.type, event.state_key
)
else:
return self._resolve_events(state_sets)
def _resolve_events(self, state_sets, event_type=None, state_key=""):
"""
:returns a tuple (new_state, prev_states). new_state is a map
from (type, state_key) to event. prev_states is a list of event_ids.
:rtype: (dict[(str, str), synapse.events.FrozenEvent], list[str])
"""
state = {}
for st in state_sets:
for e in st:
state.setdefault(
(e.type, e.state_key),
{}
)[e.event_id] = e
unconflicted_state = {
k: v.values()[0] for k, v in state.items()
if len(v.values()) == 1
}
conflicted_state = {
k: v.values()
for k, v in state.items()
if len(v.values()) > 1
}
if event_type:
prev_states_events = conflicted_state.get(
(event_type, state_key), []
)
prev_states = [s.event_id for s in prev_states_events]
else:
prev_states = []
auth_events = {
k: e for k, e in unconflicted_state.items()
if k[0] in AuthEventTypes
}
try:
resolved_state = self._resolve_state_events(
conflicted_state, auth_events
)
except:
logger.exception("Failed to resolve state")
raise
new_state = unconflicted_state
new_state.update(resolved_state)
return new_state, prev_states
@log_function
def _resolve_state_events(self, conflicted_state, auth_events):
""" This is where we actually decide which of the conflicted state to
use.
We resolve conflicts in the following order:
1. power levels
2. join rules
3. memberships
4. other events.
"""
resolved_state = {}
power_key = (EventTypes.PowerLevels, "")
if power_key in conflicted_state:
events = conflicted_state[power_key]
logger.debug("Resolving conflicted power levels %r", events)
resolved_state[power_key] = self._resolve_auth_events(
events, auth_events)
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
if key[0] == EventTypes.JoinRules:
logger.debug("Resolving conflicted join rules %r", events)
resolved_state[key] = self._resolve_auth_events(
events,
auth_events
)
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
if key[0] == EventTypes.Member:
logger.debug("Resolving conflicted member lists %r", events)
resolved_state[key] = self._resolve_auth_events(
events,
auth_events
)
auth_events.update(resolved_state)
for key, events in conflicted_state.items():
if key not in resolved_state:
logger.debug("Resolving conflicted state %r:%r", key, events)
resolved_state[key] = self._resolve_normal_events(
events, auth_events
)
return resolved_state
def _resolve_auth_events(self, events, auth_events):
reverse = [i for i in reversed(self._ordered_events(events))]
auth_events = dict(auth_events)
prev_event = reverse[0]
for event in reverse[1:]:
auth_events[(prev_event.type, prev_event.state_key)] = prev_event
try:
# FIXME: hs.get_auth() is bad style, but we need to do it to
# get around circular deps.
self.hs.get_auth().check(event, auth_events)
prev_event = event
except AuthError:
return prev_event
return event
def _resolve_normal_events(self, events, auth_events):
for event in self._ordered_events(events):
try:
# FIXME: hs.get_auth() is bad style, but we need to do it to
# get around circular deps.
self.hs.get_auth().check(event, auth_events)
return event
except AuthError:
pass
# Use the last event (the one with the least depth) if they all fail
# the auth check.
return event
def _ordered_events(self, events):
def key_func(e):
return -int(e.depth), hashlib.sha1(e.event_id).hexdigest()
return sorted(events, key=key_func)
|
|
# Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Classes to handle image files.
Collection of classes to handle image upload/download to/from Image service
(like Glance image storage and retrieval service) from/to VMware server.
"""
import httplib
import urllib
import urllib2
from oslo_utils import netutils
import six.moves.urllib.parse as urlparse
from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim_util
LOG = logging.getLogger(__name__)
USER_AGENT = 'OpenStack-ESX-Adapter'
READ_CHUNKSIZE = 65536
class GlanceFileRead(object):
"""Glance file read handler class."""
def __init__(self, glance_read_iter):
self.glance_read_iter = glance_read_iter
self.iter = self.get_next()
def read(self, chunk_size):
"""Read an item from the queue.
The chunk size is ignored for the Client ImageBodyIterator
uses its own CHUNKSIZE.
"""
try:
return self.iter.next()
except StopIteration:
return ""
def get_next(self):
"""Get the next item from the image iterator."""
for data in self.glance_read_iter:
yield data
def close(self):
"""A dummy close just to maintain consistency."""
pass
class VMwareHTTPFile(object):
"""Base class for VMDK file access over HTTP."""
def __init__(self, file_handle):
self.eof = False
self.file_handle = file_handle
def close(self):
"""Close the file handle."""
try:
self.file_handle.close()
except Exception as exc:
LOG.exception(exc)
def __del__(self):
"""Close the file handle on garbage collection."""
self.close()
def _build_vim_cookie_headers(self, vim_cookies):
"""Build ESX host session cookie headers."""
cookie_header = ""
for vim_cookie in vim_cookies:
cookie_header = vim_cookie.name + '=' + vim_cookie.value
break
return cookie_header
def write(self, data):
"""Write data to the file."""
raise NotImplementedError()
def read(self, chunk_size):
"""Read a chunk of data."""
raise NotImplementedError()
def get_size(self):
"""Get size of the file to be read."""
raise NotImplementedError()
def get_soap_url(self, scheme, host):
"""return IPv4/v6 compatible url constructed for host."""
if netutils.is_valid_ipv6(host):
return '%s://[%s]' % (scheme, host)
return '%s://%s' % (scheme, host)
def _fix_esx_url(self, url, host):
"""Fix netloc if it is a ESX host.
For a ESX host the netloc is set to '*' in the url returned in
HttpNfcLeaseInfo. The netloc is right IP when talking to a VC.
"""
urlp = urlparse.urlparse(url)
if urlp.netloc == '*':
scheme, _, path, params, query, fragment = urlp
url = urlparse.urlunparse((scheme, host, path, params,
query, fragment))
return url
def find_vmdk_url(self, lease_info, host):
"""Find the URL corresponding to a vmdk disk in lease info."""
url = None
for deviceUrl in lease_info.deviceUrl:
if deviceUrl.disk:
url = self._fix_esx_url(deviceUrl.url, host)
break
return url
class VMwareHTTPWriteFile(VMwareHTTPFile):
"""VMware file write handler class."""
def __init__(self, host, data_center_name, datastore_name, cookies,
file_path, file_size, scheme='https'):
soap_url = self.get_soap_url(scheme, host)
base_url = '%s/folder/%s' % (soap_url, file_path)
param_list = {'dcPath': data_center_name, 'dsName': datastore_name}
base_url = base_url + '?' + urllib.urlencode(param_list)
_urlparse = urlparse.urlparse(base_url)
scheme, netloc, path, _params, query, _fragment = _urlparse
if scheme == 'http':
conn = httplib.HTTPConnection(netloc)
elif scheme == 'https':
conn = httplib.HTTPSConnection(netloc)
conn.putrequest('PUT', path + '?' + query)
conn.putheader('User-Agent', USER_AGENT)
conn.putheader('Content-Length', file_size)
conn.putheader('Cookie', self._build_vim_cookie_headers(cookies))
conn.endheaders()
self.conn = conn
VMwareHTTPFile.__init__(self, conn)
def write(self, data):
"""Write to the file."""
self.file_handle.send(data)
def close(self):
"""Get the response and close the connection."""
try:
self.conn.getresponse()
except Exception as excep:
LOG.debug("Exception during HTTP connection close in "
"VMwareHTTPWrite. Exception is %s." % excep)
super(VMwareHTTPWriteFile, self).close()
class VMwareHTTPWriteVmdk(VMwareHTTPFile):
"""Write VMDK over HTTP using VMware HttpNfcLease."""
def __init__(self, session, host, rp_ref, vm_folder_ref, vm_create_spec,
vmdk_size):
"""Initialize a writer for vmdk file.
:param session: a valid api session to ESX/VC server
:param host: the ESX or VC host IP
:param rp_ref: resource pool into which backing VM is imported
:param vm_folder_ref: VM folder in ESX/VC inventory to use as parent
of backing VM
:param vm_create_spec: backing VM created using this create spec
:param vmdk_size: VMDK size to be imported into backing VM
"""
self._session = session
self._vmdk_size = vmdk_size
self._progress = 0
lease = session.invoke_api(session.vim, 'ImportVApp', rp_ref,
spec=vm_create_spec, folder=vm_folder_ref)
session.wait_for_lease_ready(lease)
self._lease = lease
lease_info = session.invoke_api(vim_util, 'get_object_property',
session.vim, lease, 'info')
self._vm_ref = lease_info.entity
# Find the url for vmdk device
url = self.find_vmdk_url(lease_info, host)
if not url:
msg = _("Could not retrieve URL from lease.")
LOG.exception(msg)
raise error_util.VimException(msg)
LOG.info(_LI("Opening vmdk url: %s for write.") % url)
# Prepare the http connection to the vmdk url
cookies = session.vim.client.options.transport.cookiejar
_urlparse = urlparse.urlparse(url)
scheme, netloc, path, _params, query, _fragment = _urlparse
if scheme == 'http':
conn = httplib.HTTPConnection(netloc)
elif scheme == 'https':
conn = httplib.HTTPSConnection(netloc)
if query:
path = path + '?' + query
conn.putrequest('PUT', path)
conn.putheader('User-Agent', USER_AGENT)
conn.putheader('Content-Length', str(vmdk_size))
conn.putheader('Overwrite', 't')
conn.putheader('Cookie', self._build_vim_cookie_headers(cookies))
conn.putheader('Content-Type', 'binary/octet-stream')
conn.endheaders()
self.conn = conn
VMwareHTTPFile.__init__(self, conn)
def write(self, data):
"""Write to the file."""
self._progress += len(data)
LOG.debug("Written %s bytes to vmdk." % self._progress)
self.file_handle.send(data)
def update_progress(self):
"""Updates progress to lease.
This call back to the lease is essential to keep the lease alive
across long running write operations.
"""
percent = int(float(self._progress) / self._vmdk_size * 100)
try:
LOG.debug("Updating progress to %s percent." % percent)
self._session.invoke_api(self._session.vim,
'HttpNfcLeaseProgress',
self._lease, percent=percent)
except error_util.VimException as ex:
LOG.exception(ex)
raise ex
def close(self):
"""End the lease and close the connection."""
state = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim,
self._lease, 'state')
if state == 'ready':
self._session.invoke_api(self._session.vim, 'HttpNfcLeaseComplete',
self._lease)
LOG.debug("Lease released.")
else:
LOG.debug("Lease is already in state: %s." % state)
super(VMwareHTTPWriteVmdk, self).close()
def get_imported_vm(self):
""""Get managed object reference of the VM created for import."""
return self._vm_ref
class VMwareHTTPReadVmdk(VMwareHTTPFile):
"""read VMDK over HTTP using VMware HttpNfcLease."""
def __init__(self, session, host, vm_ref, vmdk_path, vmdk_size):
"""Initialize a writer for vmdk file.
During an export operation the vmdk disk is converted to a
stream-optimized sparse disk format. So the size of the VMDK
after export may be smaller than the current vmdk disk size.
:param session: a valid api session to ESX/VC server
:param host: the ESX or VC host IP
:param vm_ref: backing VM whose vmdk is to be exported
:param vmdk_path: datastore relative path to vmdk file to be exported
:param vmdk_size: current disk size of vmdk file to be exported
"""
self._session = session
self._vmdk_size = vmdk_size
self._progress = 0
lease = session.invoke_api(session.vim, 'ExportVm', vm_ref)
session.wait_for_lease_ready(lease)
self._lease = lease
lease_info = session.invoke_api(vim_util, 'get_object_property',
session.vim, lease, 'info')
# find the right disk url corresponding to given vmdk_path
url = self.find_vmdk_url(lease_info, host)
if not url:
msg = _("Could not retrieve URL from lease.")
LOG.exception(msg)
raise error_util.VimException(msg)
LOG.info(_LI("Opening vmdk url: %s for read.") % url)
cookies = session.vim.client.options.transport.cookiejar
headers = {'User-Agent': USER_AGENT,
'Cookie': self._build_vim_cookie_headers(cookies)}
request = urllib2.Request(url, None, headers)
conn = urllib2.urlopen(request)
VMwareHTTPFile.__init__(self, conn)
def read(self, chunk_size):
"""Read a chunk from file."""
data = self.file_handle.read(READ_CHUNKSIZE)
self._progress += len(data)
LOG.debug("Read %s bytes from vmdk." % self._progress)
return data
def update_progress(self):
"""Updates progress to lease.
This call back to the lease is essential to keep the lease alive
across long running read operations.
"""
percent = int(float(self._progress) / self._vmdk_size * 100)
try:
LOG.debug("Updating progress to %s percent." % percent)
self._session.invoke_api(self._session.vim,
'HttpNfcLeaseProgress',
self._lease, percent=percent)
except error_util.VimException as ex:
LOG.exception(ex)
raise ex
def close(self):
"""End the lease and close the connection."""
state = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim,
self._lease, 'state')
if state == 'ready':
self._session.invoke_api(self._session.vim, 'HttpNfcLeaseComplete',
self._lease)
LOG.debug("Lease released.")
else:
LOG.debug("Lease is already in state: %s." % state)
super(VMwareHTTPReadVmdk, self).close()
|
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
import os
import mimetypes
from tweepy.binder import bind_api
from tweepy.error import TweepError
from tweepy.parsers import ModelParser, RawParser
from tweepy.utils import list_to_csv
class API(object):
"""Twitter API"""
def __init__(self, auth_handler=None,
host='api.twitter.com', search_host='search.twitter.com',
cache=None, secure=False, api_root='/1', search_root='',
retry_count=0, retry_delay=0, retry_errors=None,
parser=None):
self.auth = auth_handler
self.host = host
self.search_host = search_host
self.api_root = api_root
self.search_root = search_root
self.cache = cache
self.secure = secure
self.retry_count = retry_count
self.retry_delay = retry_delay
self.retry_errors = retry_errors
self.parser = parser or ModelParser()
""" statuses/public_timeline """
public_timeline = bind_api(
path = '/statuses/public_timeline.json',
payload_type = 'status', payload_list = True,
allowed_param = []
)
""" statuses/home_timeline """
home_timeline = bind_api(
path = '/statuses/home_timeline.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" statuses/friends_timeline """
friends_timeline = bind_api(
path = '/statuses/friends_timeline.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" statuses/user_timeline """
user_timeline = bind_api(
path = '/statuses/user_timeline.json',
payload_type = 'status', payload_list = True,
allowed_param = ['id', 'user_id', 'screen_name', 'since_id',
'max_id', 'count', 'page', 'include_rts',
'trim_user', 'include_entities']
)
""" statuses/mentions """
mentions = bind_api(
path = '/statuses/mentions.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
"""/statuses/:id/retweeted_by.format"""
retweeted_by = bind_api(
path = '/statuses/{id}/retweeted_by.json',
payload_type = 'status', payload_list = True,
allowed_param = ['id', 'count', 'page'],
require_auth = True
)
"""/related_results/show/:id.format"""
related_results = bind_api(
path = '/related_results/show/{id}.json',
payload_type = 'relation', payload_list = True,
allowed_param = ['id'],
require_auth = False
)
"""/statuses/:id/retweeted_by/ids.format"""
retweeted_by_ids = bind_api(
path = '/statuses/{id}/retweeted_by/ids.json',
payload_type = 'ids',
allowed_param = ['id', 'count', 'page'],
require_auth = True
)
""" statuses/retweeted_by_me """
retweeted_by_me = bind_api(
path = '/statuses/retweeted_by_me.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" statuses/retweeted_to_me """
retweeted_to_me = bind_api(
path = '/statuses/retweeted_to_me.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" statuses/retweets_of_me """
retweets_of_me = bind_api(
path = '/statuses/retweets_of_me.json',
payload_type = 'status', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" statuses/show """
get_status = bind_api(
path = '/statuses/show.json',
payload_type = 'status',
allowed_param = ['id']
)
""" statuses/update """
update_status = bind_api(
path = '/statuses/update.json',
method = 'POST',
payload_type = 'status',
allowed_param = ['status', 'in_reply_to_status_id', 'lat', 'long', 'source', 'place_id'],
require_auth = True
)
""" statuses/destroy """
destroy_status = bind_api(
path = '/statuses/destroy.json',
method = 'DELETE',
payload_type = 'status',
allowed_param = ['id'],
require_auth = True
)
""" statuses/retweet """
retweet = bind_api(
path = '/statuses/retweet/{id}.json',
method = 'POST',
payload_type = 'status',
allowed_param = ['id'],
require_auth = True
)
""" statuses/retweets """
retweets = bind_api(
path = '/statuses/retweets/{id}.json',
payload_type = 'status', payload_list = True,
allowed_param = ['id', 'count'],
require_auth = True
)
""" users/show """
get_user = bind_api(
path = '/users/show.json',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name']
)
""" Perform bulk look up of users from user ID or screenname """
def lookup_users(self, user_ids=None, screen_names=None, include_entities=True):
return self._lookup_users(list_to_csv(user_ids), list_to_csv(screen_names), include_entities)
_lookup_users = bind_api(
path = '/users/lookup.json',
payload_type = 'user', payload_list = True,
allowed_param = ['user_id', 'screen_name', 'include_entities'],
)
""" Get the authenticated user """
def me(self):
return self.get_user(screen_name=self.auth.get_username())
""" users/search """
search_users = bind_api(
path = '/users/search.json',
payload_type = 'user', payload_list = True,
require_auth = True,
allowed_param = ['q', 'per_page', 'page']
)
""" statuses/friends """
friends = bind_api(
path = '/statuses/friends.json',
payload_type = 'user', payload_list = True,
allowed_param = ['id', 'user_id', 'screen_name', 'page', 'cursor']
)
""" statuses/followers """
followers = bind_api(
path = '/statuses/followers.json',
payload_type = 'user', payload_list = True,
allowed_param = ['id', 'user_id', 'screen_name', 'page', 'cursor']
)
""" direct_messages """
direct_messages = bind_api(
path = '/direct_messages.json',
payload_type = 'direct_message', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" direct_messages/show """
get_direct_message = bind_api(
path = '/direct_messages/show/{id}.json',
payload_type = 'direct_message',
allowed_param = ['id'],
require_auth = True
)
""" direct_messages/sent """
sent_direct_messages = bind_api(
path = '/direct_messages/sent.json',
payload_type = 'direct_message', payload_list = True,
allowed_param = ['since_id', 'max_id', 'count', 'page'],
require_auth = True
)
""" direct_messages/new """
send_direct_message = bind_api(
path = '/direct_messages/new.json',
method = 'POST',
payload_type = 'direct_message',
allowed_param = ['user', 'screen_name', 'user_id', 'text'],
require_auth = True
)
""" direct_messages/destroy """
destroy_direct_message = bind_api(
path = '/direct_messages/destroy.json',
method = 'DELETE',
payload_type = 'direct_message',
allowed_param = ['id'],
require_auth = True
)
""" friendships/create """
create_friendship = bind_api(
path = '/friendships/create.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name', 'follow'],
require_auth = True
)
""" friendships/destroy """
destroy_friendship = bind_api(
path = '/friendships/destroy.json',
method = 'DELETE',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" friendships/exists """
exists_friendship = bind_api(
path = '/friendships/exists.json',
payload_type = 'json',
allowed_param = ['user_a', 'user_b']
)
""" friendships/show """
show_friendship = bind_api(
path = '/friendships/show.json',
payload_type = 'friendship',
allowed_param = ['source_id', 'source_screen_name',
'target_id', 'target_screen_name']
)
""" friends/ids """
friends_ids = bind_api(
path = '/friends/ids.json',
payload_type = 'ids',
allowed_param = ['id', 'user_id', 'screen_name', 'cursor']
)
""" friendships/incoming """
friendships_incoming = bind_api(
path = '/friendships/incoming.json',
payload_type = 'ids',
allowed_param = ['cursor']
)
""" friendships/outgoing"""
friendships_outgoing = bind_api(
path = '/friendships/outgoing.json',
payload_type = 'ids',
allowed_param = ['cursor']
)
""" followers/ids """
followers_ids = bind_api(
path = '/followers/ids.json',
payload_type = 'ids',
allowed_param = ['id', 'user_id', 'screen_name', 'cursor']
)
""" account/verify_credentials """
def verify_credentials(self):
try:
return bind_api(
path = '/account/verify_credentials.json',
payload_type = 'user',
require_auth = True
)(self)
except TweepError, e:
if e.response and e.response.status == 401:
return False
raise
""" account/rate_limit_status """
rate_limit_status = bind_api(
path = '/account/rate_limit_status.json',
payload_type = 'json',
use_cache = False
)
""" account/update_delivery_device """
set_delivery_device = bind_api(
path = '/account/update_delivery_device.json',
method = 'POST',
allowed_param = ['device'],
payload_type = 'user',
require_auth = True
)
""" account/update_profile_colors """
update_profile_colors = bind_api(
path = '/account/update_profile_colors.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['profile_background_color', 'profile_text_color',
'profile_link_color', 'profile_sidebar_fill_color',
'profile_sidebar_border_color'],
require_auth = True
)
""" account/update_profile_image """
def update_profile_image(self, filename):
headers, post_data = API._pack_image(filename, 700)
return bind_api(
path = '/account/update_profile_image.json',
method = 'POST',
payload_type = 'user',
require_auth = True
)(self, post_data=post_data, headers=headers)
""" account/update_profile_background_image """
def update_profile_background_image(self, filename, *args, **kargs):
headers, post_data = API._pack_image(filename, 800)
bind_api(
path = '/account/update_profile_background_image.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['tile'],
require_auth = True
)(self, post_data=post_data, headers=headers)
""" account/update_profile """
update_profile = bind_api(
path = '/account/update_profile.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['name', 'url', 'location', 'description'],
require_auth = True
)
""" favorites """
favorites = bind_api(
path = '/favorites.json',
payload_type = 'status', payload_list = True,
allowed_param = ['id', 'page']
)
""" favorites/create """
create_favorite = bind_api(
path = '/favorites/create/{id}.json',
method = 'POST',
payload_type = 'status',
allowed_param = ['id'],
require_auth = True
)
""" favorites/destroy """
destroy_favorite = bind_api(
path = '/favorites/destroy/{id}.json',
method = 'DELETE',
payload_type = 'status',
allowed_param = ['id'],
require_auth = True
)
""" notifications/follow """
enable_notifications = bind_api(
path = '/notifications/follow.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" notifications/leave """
disable_notifications = bind_api(
path = '/notifications/leave.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" blocks/create """
create_block = bind_api(
path = '/blocks/create.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" blocks/destroy """
destroy_block = bind_api(
path = '/blocks/destroy.json',
method = 'DELETE',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" blocks/exists """
def exists_block(self, *args, **kargs):
try:
bind_api(
path = '/blocks/exists.json',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)(self, *args, **kargs)
except TweepError:
return False
return True
""" blocks/blocking """
blocks = bind_api(
path = '/blocks/blocking.json',
payload_type = 'user', payload_list = True,
allowed_param = ['page'],
require_auth = True
)
""" blocks/blocking/ids """
blocks_ids = bind_api(
path = '/blocks/blocking/ids.json',
payload_type = 'json',
require_auth = True
)
""" report_spam """
report_spam = bind_api(
path = '/report_spam.json',
method = 'POST',
payload_type = 'user',
allowed_param = ['id', 'user_id', 'screen_name'],
require_auth = True
)
""" saved_searches """
saved_searches = bind_api(
path = '/saved_searches.json',
payload_type = 'saved_search', payload_list = True,
require_auth = True
)
""" saved_searches/show """
get_saved_search = bind_api(
path = '/saved_searches/show/{id}.json',
payload_type = 'saved_search',
allowed_param = ['id'],
require_auth = True
)
""" saved_searches/create """
create_saved_search = bind_api(
path = '/saved_searches/create.json',
method = 'POST',
payload_type = 'saved_search',
allowed_param = ['query'],
require_auth = True
)
""" saved_searches/destroy """
destroy_saved_search = bind_api(
path = '/saved_searches/destroy/{id}.json',
method = 'DELETE',
payload_type = 'saved_search',
allowed_param = ['id'],
require_auth = True
)
""" help/test """
def test(self):
try:
bind_api(
path = '/help/test.json',
)(self)
except TweepError:
return False
return True
def create_list(self, *args, **kargs):
return bind_api(
path = '/%s/lists.json' % self.auth.get_username(),
method = 'POST',
payload_type = 'list',
allowed_param = ['name', 'mode', 'description'],
require_auth = True
)(self, *args, **kargs)
def destroy_list(self, slug):
return bind_api(
path = '/%s/lists/%s.json' % (self.auth.get_username(), slug),
method = 'DELETE',
payload_type = 'list',
require_auth = True
)(self)
def update_list(self, slug, *args, **kargs):
return bind_api(
path = '/%s/lists/%s.json' % (self.auth.get_username(), slug),
method = 'POST',
payload_type = 'list',
allowed_param = ['name', 'mode', 'description'],
require_auth = True
)(self, *args, **kargs)
lists = bind_api(
path = '/{user}/lists.json',
payload_type = 'list', payload_list = True,
allowed_param = ['user', 'cursor'],
require_auth = True
)
lists_memberships = bind_api(
path = '/{user}/lists/memberships.json',
payload_type = 'list', payload_list = True,
allowed_param = ['user', 'cursor'],
require_auth = True
)
lists_subscriptions = bind_api(
path = '/{user}/lists/subscriptions.json',
payload_type = 'list', payload_list = True,
allowed_param = ['user', 'cursor'],
require_auth = True
)
list_timeline = bind_api(
path = '/{owner}/lists/{slug}/statuses.json',
payload_type = 'status', payload_list = True,
allowed_param = ['owner', 'slug', 'since_id', 'max_id', 'per_page', 'page']
)
get_list = bind_api(
path = '/{owner}/lists/{slug}.json',
payload_type = 'list',
allowed_param = ['owner', 'slug']
)
def add_list_member(self, slug, *args, **kargs):
return bind_api(
path = '/%s/%s/members.json' % (self.auth.get_username(), slug),
method = 'POST',
payload_type = 'list',
allowed_param = ['id'],
require_auth = True
)(self, *args, **kargs)
def remove_list_member(self, slug, *args, **kargs):
return bind_api(
path = '/%s/%s/members.json' % (self.auth.get_username(), slug),
method = 'DELETE',
payload_type = 'list',
allowed_param = ['id'],
require_auth = True
)(self, *args, **kargs)
list_members = bind_api(
path = '/{owner}/{slug}/members.json',
payload_type = 'user', payload_list = True,
allowed_param = ['owner', 'slug', 'cursor']
)
def is_list_member(self, owner, slug, user_id):
try:
return bind_api(
path = '/%s/%s/members/%s.json' % (owner, slug, user_id),
payload_type = 'user'
)(self)
except TweepError:
return False
subscribe_list = bind_api(
path = '/{owner}/{slug}/subscribers.json',
method = 'POST',
payload_type = 'list',
allowed_param = ['owner', 'slug'],
require_auth = True
)
unsubscribe_list = bind_api(
path = '/{owner}/{slug}/subscribers.json',
method = 'DELETE',
payload_type = 'list',
allowed_param = ['owner', 'slug'],
require_auth = True
)
list_subscribers = bind_api(
path = '/{owner}/{slug}/subscribers.json',
payload_type = 'user', payload_list = True,
allowed_param = ['owner', 'slug', 'cursor']
)
def is_subscribed_list(self, owner, slug, user_id):
try:
return bind_api(
path = '/%s/%s/subscribers/%s.json' % (owner, slug, user_id),
payload_type = 'user'
)(self)
except TweepError:
return False
""" trends/available """
trends_available = bind_api(
path = '/trends/available.json',
payload_type = 'json',
allowed_param = ['lat', 'long']
)
""" trends/location """
trends_location = bind_api(
path = '/trends/{woeid}.json',
payload_type = 'json',
allowed_param = ['woeid']
)
""" search """
search = bind_api(
search_api = True,
path = '/search.json',
payload_type = 'search_result', payload_list = True,
allowed_param = ['q', 'lang', 'locale', 'rpp', 'page', 'since_id', 'geocode', 'show_user', 'max_id', 'since', 'until', 'result_type']
)
search.pagination_mode = 'page'
""" trends """
trends = bind_api(
path = '/trends.json',
payload_type = 'json'
)
""" trends/current """
trends_current = bind_api(
path = '/trends/current.json',
payload_type = 'json',
allowed_param = ['exclude']
)
""" trends/daily """
trends_daily = bind_api(
path = '/trends/daily.json',
payload_type = 'json',
allowed_param = ['date', 'exclude']
)
""" trends/weekly """
trends_weekly = bind_api(
path = '/trends/weekly.json',
payload_type = 'json',
allowed_param = ['date', 'exclude']
)
""" geo/reverse_geocode """
reverse_geocode = bind_api(
path = '/geo/reverse_geocode.json',
payload_type = 'json',
allowed_param = ['lat', 'long', 'accuracy', 'granularity', 'max_results']
)
""" geo/nearby_places """
# listed as deprecated on twitter's API documents
nearby_places = bind_api(
path = '/geo/nearby_places.json',
payload_type = 'json',
allowed_param = ['lat', 'long', 'ip', 'accuracy', 'granularity', 'max_results']
)
""" geo/id """
geo_id = bind_api(
path = '/geo/id/{id}.json',
payload_type = 'json',
allowed_param = ['id']
)
""" geo/search """
geo_search = bind_api(
path = '/geo/search.json',
payload_type = 'json',
allowed_param = ['lat', 'long', 'query', 'ip', 'granularity', 'accuracy', 'max_results', 'contained_within']
)
""" Internal use only """
@staticmethod
def _pack_image(filename, max_size):
"""Pack image from file into multipart-formdata post body"""
# image must be less than 700kb in size
try:
if os.path.getsize(filename) > (max_size * 1024):
raise TweepError('File is too big, must be less than 700kb.')
except os.error, e:
raise TweepError('Unable to access file')
# image must be gif, jpeg, or png
file_type = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
file_type = file_type[0]
if file_type not in ['image/gif', 'image/jpeg', 'image/png']:
raise TweepError('Invalid file type for image: %s' % file_type)
# build the mulitpart-formdata body
fp = open(filename, 'rb')
BOUNDARY = 'Tw3ePy'
body = []
body.append('--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="image"; filename="%s"' % filename)
body.append('Content-Type: %s' % file_type)
body.append('')
body.append(fp.read())
body.append('--' + BOUNDARY + '--')
body.append('')
fp.close()
body = '\r\n'.join(body)
# build headers
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy',
'Content-Length': str(len(body))
}
return headers, body
|
|
# pyOCD debugger
# Copyright (c) 2018-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import logging.config
import six
import yaml
import os
import weakref
# inspect.getargspec is deprecated in Python 3.
try:
from inspect import getfullargspec as getargspec
except ImportError:
from inspect import getargspec
from . import exceptions
from .options_manager import OptionsManager
from ..board.board import Board
from ..utility.notification import Notifier
LOG = logging.getLogger(__name__)
## @brief Set of default config filenames to search for.
_CONFIG_FILE_NAMES = [
"pyocd.yaml",
"pyocd.yml",
".pyocd.yaml",
".pyocd.yml",
]
## @brief Set of default user script names to search for.
_USER_SCRIPT_NAMES = [
"pyocd_user.py",
".pyocd_user.py",
]
class Session(Notifier):
"""! @brief Top-level object for a debug session.
This class represents a debug session with a single debug probe. It is the root of the object
graph, where it owns the debug probe and the board objects.
Another important function of this class is that it contains a dictionary of session-scope
options. These would normally be passed in from the command line. Options can also be loaded
from a config file.
Precedence for session options:
1. Keyword arguments to constructor.
2. _options_ parameter to constructor.
3. Probe-specific options from a config file.
4. General options from a config file.
5. _option_defaults_ parameter to constructor.
The session also tracks several other objects:
- @ref pyocd.gdbserver.gdbserver.GDBServer "GDBServer" instances created for any cores.
- @ref pyocd.probe.tcp_probe_server.DebugProbeServer "DebugProbeServer".
- The user script proxy.
See the @ref pyocd.core.helpers.ConnectHelper "ConnectHelper" class for several methods that
make it easy to create new sessions, with or without user interaction in the case of multiple
available debug probes. A common pattern is to combine @ref
pyocd.core.helpers.ConnectHelper.session_with_chosen_probe()
"ConnectHelper.session_with_chosen_probe()" and a **with** block.
A Session instance can be used as a context manager. The session will, by default, be
automatically opened when the context is entered. And, of course, it will be closed when the
**with** block is exited (which is harmless if the session was never opened). If you wish to
disable automatic opening, set the `auto_open` parameter to the constructor to False. If an
exception is raised while opening a session inside a **with** statement, the session will be
closed for you to undo any partial initialisation.
"""
## @brief Weak reference to the most recently created session.
_current_session = None
@classmethod
def get_current(cls):
"""! @brief Return the most recently created Session instance or a default Session.
By default this method will return the most recently created Session object that is
still alive. If no live session exists, a new default session will be created and returned.
That at least provides access to the user's config file(s).
Used primarily so code that doesn't have a session reference can access session options. This
method should only be used to access options that are unlikely to differ between sessions,
or for debug or other purposes.
"""
if cls._current_session is not None:
return cls._current_session()
else:
return Session(None)
def __init__(self, probe, auto_open=True, options=None, option_defaults=None, **kwargs):
"""! @brief Session constructor.
Creates a new session using the provided debug probe. Session options are merged from the
_options_ parameter and any keyword arguments. Normally a board instance is created that can
either be a generic board or a board associated with the debug probe.
Note that the 'project_dir' and 'config' options must be set in either keyword arguments or
the _options_ parameter.
Passing in a _probe_ that is None is allowed. This is useful to create a session that operates
only as a container for session options. In this case, the board instance is not created, so the
#board attribute will be None. Such a Session cannot be opened.
@param self
@param probe The @ref pyocd.probe.debug_probe. "DebugProbe" instance. May be None.
@param auto_open Whether to automatically open the session when used as a context manager.
@param options Optional session options dictionary.
@param option_defaults Optional dictionary of session option values. This dictionary has the
lowest priority in determining final session option values, and is intended to set new
defaults for option if they are not set through any other method.
@param kwargs Session options passed as keyword arguments.
"""
super(Session, self).__init__()
Session._current_session = weakref.ref(self)
self._probe = probe
self._closed = True
self._inited = False
self._user_script_namespace = None
self._user_script_proxy = None
self._delegate = None
self._auto_open = auto_open
self._options = OptionsManager()
self._gdbservers = {}
self._probeserver = None
# Set this session on the probe, if we were given a probe.
if probe is not None:
probe.session = self
# Update options.
self._options.add_front(kwargs)
self._options.add_back(options)
# Init project directory.
if self.options.get('project_dir') is None:
self._project_dir = os.getcwd()
else:
self._project_dir = os.path.abspath(os.path.expanduser(self.options.get('project_dir')))
LOG.debug("Project directory: %s", self.project_dir)
# Apply common configuration settings from the config file.
config = self._get_config()
probesConfig = config.pop('probes', None)
self._options.add_back(config)
# Pick up any config file options for this board.
if (probe is not None) and (probesConfig is not None):
for uid, settings in probesConfig.items():
if str(uid).lower() in probe.unique_id.lower():
LOG.info("Using config settings for probe %s" % (probe.unique_id))
self._options.add_back(settings)
# Merge in lowest priority options.
self._options.add_back(option_defaults)
# Logging config.
self._configure_logging()
# Bail early if we weren't provided a probe.
if probe is None:
self._board = None
return
# Load the user script.
self._load_user_script()
# Ask the probe if it has an associated board, and if not then we create a generic one.
self._board = probe.create_associated_board() \
or Board(self, self.options.get('target_override'))
def _get_config(self):
# Load config file if one was provided via options, and no_config option was not set.
if not self.options.get('no_config'):
configPath = self.find_user_file('config_file', _CONFIG_FILE_NAMES)
if configPath is not None:
try:
with open(configPath, 'r') as configFile:
LOG.debug("Loading config from: %s", configPath)
config = yaml.safe_load(configFile)
if not isinstance(config, dict):
raise exceptions.Error("configuration file %s does not contain a top-level dictionary"
% configPath)
return config
except IOError as err:
LOG.warning("Error attempting to access config file '%s': %s", configPath, err)
return {}
def find_user_file(self, option_name, filename_list):
"""! @brief Search the project directory for a file.
@retval None No matching file was found.
@retval string An absolute path to the requested file.
"""
if option_name is not None:
filePath = self.options.get(option_name)
else:
filePath = None
# Look for default filenames if a path wasn't provided.
if filePath is None:
for filename in filename_list:
thisPath = os.path.join(self.project_dir, filename)
if os.path.isfile(thisPath):
filePath = thisPath
break
# Use the path passed in options, which may be absolute, relative to the
# home directory, or relative to the project directory.
else:
filePath = os.path.expanduser(filePath)
if not os.path.isabs(filePath):
filePath = os.path.join(self.project_dir, filePath)
return filePath
def _configure_logging(self):
"""! @brief Load a logging config dict or file."""
# Get logging config that could have been loaded from the config file.
config = self.options.get('logging')
# Allow logging setting to refer to another file.
if isinstance(config, six.string_types):
loggingConfigPath = self.find_user_file(None, [config])
if loggingConfigPath is not None:
try:
with open(loggingConfigPath, 'r') as configFile:
config = yaml.safe_load(configFile)
LOG.debug("Using logging configuration from: %s", config)
except IOError as err:
LOG.warning("Error attempting to load logging config file '%s': %s", config, err)
return
if config is not None:
# Stuff a version key if it's missing, to make it easier to use.
if 'version' not in config:
config['version'] = 1
# Set a different default for disabling existing loggers.
if 'disable_existing_loggers' not in config:
config['disable_existing_loggers'] = False
# Remove an empty 'loggers' key.
if ('loggers' in config) and (config['loggers'] is None):
del config['loggers']
try:
logging.config.dictConfig(config)
except (ValueError, TypeError, AttributeError, ImportError) as err:
LOG.warning("Error applying logging configuration: %s", err)
@property
def is_open(self):
"""! @brief Boolean of whether the session has been opened."""
return self._inited and not self._closed
@property
def probe(self):
"""! @brief The @ref pyocd.probe.debug_probe.DebugProbe "DebugProbe" instance."""
return self._probe
@property
def board(self):
"""! @brief The @ref pyocd.board.board.Board "Board" object."""
return self._board
@property
def target(self):
"""! @brief The @ref pyocd.core.target.soc_target "SoCTarget" object representing the SoC.
This is the @ref pyocd.core.target.soc_target "SoCTarget" instance owned by the board.
"""
return self.board.target
@property
def options(self):
"""! @brief The @ref pyocd.core.options_manager.OptionsManager "OptionsManager" object."""
return self._options
@property
def project_dir(self):
"""! @brief Path to the project directory."""
return self._project_dir
@property
def delegate(self):
"""! @brief An optional delegate object for customizing behaviour."""
return self._delegate
@delegate.setter
def delegate(self, new_delegate):
"""! @brief Setter for the `delegate` property."""
self._delegate = new_delegate
@property
def user_script_proxy(self):
"""! @brief The UserScriptDelegateProxy object for a loaded user script."""
return self._user_script_proxy
@property
def gdbservers(self):
"""! @brief Dictionary of core numbers to @ref pyocd.gdbserver.gdbserver.GDBServer "GDBServer" instances."""
return self._gdbservers
@property
def probeserver(self):
"""! @brief A @ref pyocd.probe.tcp_probe_server.DebugProbeServer "DebugProbeServer" instance."""
return self._probeserver
@probeserver.setter
def probeserver(self, server):
"""! @brief Setter for the `probeserver` property."""
self._probeserver = server
@property
def log_tracebacks(self):
"""! @brief Quick access to debug.traceback option since it is widely used."""
return self.options.get('debug.traceback')
def __enter__(self):
assert self._probe is not None
if self._auto_open:
try:
self.open()
except Exception:
self.close()
raise
return self
def __exit__(self, type, value, traceback):
self.close()
return False
def _init_user_script_namespace(self, user_script_path):
"""! @brief Create the namespace dict used for user scripts.
This initial namespace has only those objects that are available very early in the
session init process. For instance, the Target instance isn't available yet. The
_update_user_script_namespace() method is used to add such objects to the namespace
later on.
"""
import pyocd
import pyocd.flash.file_programmer
self._user_script_namespace = {
# Modules and classes
'pyocd': pyocd,
'exceptions': pyocd.core.exceptions,
'Error': pyocd.core.exceptions.Error,
'TransferError': pyocd.core.exceptions.TransferError,
'TransferFaultError': pyocd.core.exceptions.TransferFaultError,
'Target': pyocd.core.target.Target,
'State': pyocd.core.target.Target.State,
'SecurityState': pyocd.core.target.Target.SecurityState,
'BreakpointType': pyocd.core.target.Target.BreakpointType,
'WatchpointType': pyocd.core.target.Target.WatchpointType,
'VectorCatch': pyocd.core.target.Target.VectorCatch,
'Event': pyocd.core.target.Target.Event,
'RunType': pyocd.core.target.Target.RunType,
'HaltReason': pyocd.core.target.Target.HaltReason,
'ResetType': pyocd.core.target.Target.ResetType,
'MemoryType': pyocd.core.memory_map.MemoryType,
'MemoryMap': pyocd.core.memory_map.MemoryMap,
'RamRegion': pyocd.core.memory_map.RamRegion,
'RomRegion': pyocd.core.memory_map.RomRegion,
'FlashRegion': pyocd.core.memory_map.FlashRegion,
'DeviceRegion': pyocd.core.memory_map.DeviceRegion,
'FileProgrammer': pyocd.flash.file_programmer.FileProgrammer,
'FlashEraser': pyocd.flash.eraser.FlashEraser,
'FlashLoader': pyocd.flash.loader.FlashLoader,
# User script info
'__name__': os.path.splitext(os.path.basename(user_script_path))[0],
'__file__': user_script_path,
# Objects
'session': self,
'options': self.options,
'LOG': logging.getLogger('pyocd.user_script'),
}
def _update_user_script_namespace(self):
"""! @brief Add objects available only after init to the user script namespace."""
if self._user_script_namespace is not None:
self._user_script_namespace.update({
'probe': self.probe,
'board': self.board,
'target': self.target,
'dp': self.target.dp,
'aps': self.target.aps,
})
def _load_user_script(self):
scriptPath = self.find_user_file('user_script', _USER_SCRIPT_NAMES)
if scriptPath is not None:
try:
# Read the script source.
with open(scriptPath, 'r') as scriptFile:
LOG.debug("Loading user script: %s", scriptPath)
scriptSource = scriptFile.read()
self._init_user_script_namespace(scriptPath)
scriptCode = compile(scriptSource, scriptPath, 'exec')
# Executing the code will create definitions in the namespace for any
# functions or classes. A single namespace is shared for both globals and
# locals so that script-level definitions are available within the
# script functions.
six.exec_(scriptCode, self._user_script_namespace, self._user_script_namespace)
# Create the proxy for the user script. It becomes the delegate unless
# another delegate was already set.
self._user_script_proxy = UserScriptDelegateProxy(self._user_script_namespace)
if self._delegate is None:
self._delegate = self._user_script_proxy
except IOError as err:
LOG.warning("Error attempting to load user script '%s': %s", scriptPath, err)
def open(self, init_board=True):
"""! @brief Open the session.
This method does everything necessary to begin a debug session. It first loads the user
script, if there is one. The user script will be available via the _user_script_proxy_
property. Then it opens the debug probe and sets the clock rate from the `frequency` user
option. Finally, it inits the board (which will init the target, which performs the
full target init sequence).
@param self
@param init_board This parameter lets you prevent the board from being inited, which can
be useful in board bringup situations. It's also used by pyocd commander's "no init"
feature.
"""
if not self._inited:
assert self._probe is not None, "Cannot open a session without a probe."
assert self._board is not None, "Must have a board to open a session."
# Add in the full set of objects for the user script.
self._update_user_script_namespace()
self._probe.open()
self._closed = False
self._probe.set_clock(self.options.get('frequency'))
if init_board:
self._board.init()
self._inited = True
def close(self):
"""! @brief Close the session.
Uninits the board and disconnects then closes the probe.
"""
if self._closed:
return
self._closed = True
LOG.debug("uninit session %s", self)
if self._inited:
try:
self.board.uninit()
self._inited = False
except:
LOG.error("exception during board uninit:", exc_info=self.log_tracebacks)
if self._probe.is_open:
try:
self._probe.disconnect()
except:
LOG.error("probe exception during disconnect:", exc_info=self.log_tracebacks)
try:
self._probe.close()
except:
LOG.error("probe exception during close:", exc_info=self.log_tracebacks)
class UserScriptFunctionProxy(object):
"""! @brief Proxy for user script functions.
This proxy makes arguments to user script functions optional.
"""
def __init__(self, fn):
self._fn = fn
self._spec = getargspec(fn)
def __call__(self, **kwargs):
args = {}
for arg in self._spec.args:
if arg in kwargs:
args[arg] = kwargs[arg]
self._fn(**args)
class UserScriptDelegateProxy(object):
"""! @brief Delegate proxy for user scripts."""
def __init__(self, script_namespace):
super(UserScriptDelegateProxy, self).__init__()
self._script = script_namespace
def __getattr__(self, name):
if name in self._script:
fn = self._script[name]
return UserScriptFunctionProxy(fn)
else:
raise AttributeError(name)
|
|
from .base import Renderer, RenderContext
from . import index as indexrenderer
from . import compound as compoundrenderer
from docutils import nodes
import textwrap
class RstContentCreator(object):
def __init__(self, list_type, dedent):
self.list_type = list_type
self.dedent = dedent
def __call__(self, text):
# Remove the first line which is "embed:rst[:leading-asterisk]"
text = "\n".join(text.split(u"\n")[1:])
# Remove starting whitespace
text = self.dedent(text)
# Inspired by autodoc.py in Sphinx
result = self.list_type()
for line in text.split("\n"):
result.append(line, "<breathe>")
return result
class UnicodeRenderer(Renderer):
def render(self):
# Skip any nodes that are pure whitespace
# Probably need a better way to do this as currently we're only doing
# it skip whitespace between higher-level nodes, but this will also
# skip any pure whitespace entries in actual content nodes
#
# We counter that second issue slightly by allowing through single white spaces
#
if self.data_object.strip():
return [self.node_factory.Text(self.data_object)]
elif self.data_object == unicode(" "):
return [self.node_factory.Text(self.data_object)]
else:
return []
class NullRenderer(Renderer):
def __init__(self):
pass
def render(self):
return []
class DoxygenToRstRendererFactory(object):
def __init__(
self,
node_type,
renderers,
renderer_factory_creator,
node_factory,
project_info,
state,
document,
rst_content_creator,
filter_,
target_handler,
domain_directive_factory
):
self.node_type = node_type
self.node_factory = node_factory
self.project_info = project_info
self.renderers = renderers
self.renderer_factory_creator = renderer_factory_creator
self.state = state
self.document = document
self.rst_content_creator = rst_content_creator
self.filter_ = filter_
self.target_handler = target_handler
self.domain_directive_factory = domain_directive_factory
def create_renderer(
self,
context
):
parent_data_object = context.node_stack[1]
data_object = context.node_stack[0]
if not self.filter_.allow(context.node_stack):
return NullRenderer()
child_renderer_factory = self.renderer_factory_creator.create_child_factory(
self.project_info,
data_object,
self
)
try:
node_type = data_object.node_type
except AttributeError as e:
# Horrible hack to silence errors on filtering unicode objects
# until we fix the parsing
if type(data_object) == unicode:
node_type = "unicode"
else:
raise e
Renderer = self.renderers[node_type]
common_args = [
self.project_info,
context,
child_renderer_factory,
self.node_factory,
self.state,
self.document,
self.target_handler,
self.domain_directive_factory
]
if node_type == "docmarkup":
creator = self.node_factory.inline
if data_object.type_ == "emphasis":
creator = self.node_factory.emphasis
elif data_object.type_ == "computeroutput":
creator = self.node_factory.literal
elif data_object.type_ == "bold":
creator = self.node_factory.strong
elif data_object.type_ == "superscript":
creator = self.node_factory.superscript
elif data_object.type_ == "subscript":
creator = self.node_factory.subscript
elif data_object.type_ == "center":
print("Warning: does not currently handle 'center' text display")
elif data_object.type_ == "small":
print("Warning: does not currently handle 'small' text display")
return Renderer(
creator,
*common_args
)
if node_type == "verbatim":
return Renderer(
self.rst_content_creator,
*common_args
)
if node_type == "compound":
kind = data_object.kind
if kind in ["file", "dir", "page", "example", "group"]:
return Renderer(indexrenderer.FileRenderer, *common_args)
class_ = indexrenderer.CompoundTypeSubRenderer
# For compound node types Renderer is CreateCompoundTypeSubRenderer
# as defined below. This could be cleaner
return Renderer(
class_,
*common_args
)
if node_type == "memberdef":
if data_object.kind in ("function", "slot") or (data_object.kind == 'friend' and data_object.argsstring):
Renderer = compoundrenderer.FuncMemberDefTypeSubRenderer
elif data_object.kind == "enum":
Renderer = compoundrenderer.EnumMemberDefTypeSubRenderer
elif data_object.kind == "typedef":
Renderer = compoundrenderer.TypedefMemberDefTypeSubRenderer
elif data_object.kind == "variable":
Renderer = compoundrenderer.VariableMemberDefTypeSubRenderer
elif data_object.kind == "define":
Renderer = compoundrenderer.DefineMemberDefTypeSubRenderer
if node_type == "param":
return Renderer(
parent_data_object.node_type != "templateparamlist",
*common_args
)
if node_type == "docsimplesect":
if data_object.kind == "par":
Renderer = compoundrenderer.ParDocSimpleSectTypeSubRenderer
return Renderer(
*common_args
)
class CreateCompoundTypeSubRenderer(object):
def __init__(self, parser_factory):
self.parser_factory = parser_factory
def __call__(self, class_, project_info, *args):
compound_parser = self.parser_factory.create_compound_parser(project_info)
return class_(compound_parser, project_info, *args)
class CreateRefTypeSubRenderer(object):
def __init__(self, parser_factory):
self.parser_factory = parser_factory
def __call__(self, project_info, *args):
compound_parser = self.parser_factory.create_compound_parser(project_info)
return compoundrenderer.RefTypeSubRenderer(compound_parser, project_info, *args)
class DoxygenToRstRendererFactoryCreator(object):
def __init__(
self,
node_factory,
parser_factory,
domain_directive_factory,
rst_content_creator,
project_info
):
self.node_factory = node_factory
self.parser_factory = parser_factory
self.domain_directive_factory = domain_directive_factory
self.rst_content_creator = rst_content_creator
self.project_info = project_info
def create_factory(self, node_stack, state, document, filter_, target_handler):
data_object = node_stack[0]
renderers = {
"doxygen" : indexrenderer.DoxygenTypeSubRenderer,
"compound" : CreateCompoundTypeSubRenderer(self.parser_factory),
"doxygendef" : compoundrenderer.DoxygenTypeSubRenderer,
"compounddef" : compoundrenderer.CompoundDefTypeSubRenderer,
"sectiondef" : compoundrenderer.SectionDefTypeSubRenderer,
"memberdef" : compoundrenderer.MemberDefTypeSubRenderer,
"enumvalue" : compoundrenderer.EnumvalueTypeSubRenderer,
"linkedtext" : compoundrenderer.LinkedTextTypeSubRenderer,
"description" : compoundrenderer.DescriptionTypeSubRenderer,
"param" : compoundrenderer.ParamTypeSubRenderer,
"docreftext" : compoundrenderer.DocRefTextTypeSubRenderer,
"docheading" : compoundrenderer.DocHeadingTypeSubRenderer,
"docpara" : compoundrenderer.DocParaTypeSubRenderer,
"docmarkup" : compoundrenderer.DocMarkupTypeSubRenderer,
"docparamlist" : compoundrenderer.DocParamListTypeSubRenderer,
"docparamlistitem" : compoundrenderer.DocParamListItemSubRenderer,
"docparamnamelist" : compoundrenderer.DocParamNameListSubRenderer,
"docparamname" : compoundrenderer.DocParamNameSubRenderer,
"docsect1" : compoundrenderer.DocSect1TypeSubRenderer,
"docsimplesect" : compoundrenderer.DocSimpleSectTypeSubRenderer,
"doctitle" : compoundrenderer.DocTitleTypeSubRenderer,
"docformula" : compoundrenderer.DocForumlaTypeSubRenderer,
"docimage" : compoundrenderer.DocImageTypeSubRenderer,
"docurllink" : compoundrenderer.DocURLLinkSubRenderer,
"listing" : compoundrenderer.ListingTypeSubRenderer,
"codeline" : compoundrenderer.CodeLineTypeSubRenderer,
"highlight" : compoundrenderer.HighlightTypeSubRenderer,
"templateparamlist" : compoundrenderer.TemplateParamListRenderer,
"inc" : compoundrenderer.IncTypeSubRenderer,
"ref" : CreateRefTypeSubRenderer(self.parser_factory),
"verbatim" : compoundrenderer.VerbatimTypeSubRenderer,
"mixedcontainer" : compoundrenderer.MixedContainerRenderer,
"unicode" : UnicodeRenderer,
"doclist": compoundrenderer.DocListTypeSubRenderer,
"doclistitem": compoundrenderer.DocListItemTypeSubRenderer,
}
try:
node_type = data_object.node_type
except AttributeError as e:
# Horrible hack to silence errors on filtering unicode objects
# until we fix the parsing
if type(data_object) == unicode:
node_type = "unicode"
else:
raise e
return DoxygenToRstRendererFactory(
"root",
renderers,
self,
self.node_factory,
self.project_info,
state,
document,
self.rst_content_creator,
filter_,
target_handler,
self.domain_directive_factory
)
def create_child_factory( self, project_info, data_object, parent_renderer_factory ):
try:
node_type = data_object.node_type
except AttributeError as e:
# Horrible hack to silence errors on filtering unicode objects
# until we fix the parsing
if type(data_object) == unicode:
node_type = "unicode"
else:
raise e
return DoxygenToRstRendererFactory(
node_type,
parent_renderer_factory.renderers,
self,
self.node_factory,
parent_renderer_factory.project_info,
parent_renderer_factory.state,
parent_renderer_factory.document,
self.rst_content_creator,
parent_renderer_factory.filter_,
parent_renderer_factory.target_handler,
parent_renderer_factory.domain_directive_factory
)
# FactoryFactoryFactory. Ridiculous but necessary.
class DoxygenToRstRendererFactoryCreatorConstructor(object):
def __init__(
self,
node_factory,
parser_factory,
domain_directive_factory,
rst_content_creator
):
self.node_factory = node_factory
self.parser_factory = parser_factory
self.domain_directive_factory = domain_directive_factory
self.rst_content_creator = rst_content_creator
def create_factory_creator(self, project_info, document, options, target_handler):
return DoxygenToRstRendererFactoryCreator(
self.node_factory,
self.parser_factory,
self.domain_directive_factory,
self.rst_content_creator,
project_info,
)
def format_parser_error(name, error, filename, state, lineno, do_unicode_warning):
warning = '%s: Unable to parse xml file "%s". ' % (name, filename)
explanation = 'Reported error: %s. ' % error
unicode_explanation_text = ""
unicode_explanation = []
if do_unicode_warning:
unicode_explanation_text = textwrap.dedent("""
Parsing errors are often due to unicode errors associated with the encoding of the original
source files. Doxygen propagates invalid characters from the input source files to the
output xml.""").strip().replace("\n", " ")
unicode_explanation = [nodes.paragraph("", "", nodes.Text(unicode_explanation_text))]
return [nodes.warning("",
nodes.paragraph("", "", nodes.Text(warning)),
nodes.paragraph("", "", nodes.Text(explanation)),
*unicode_explanation
),
state.document.reporter.warning(warning + explanation + unicode_explanation_text, line=lineno)
]
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import tempfile
import time
import six
from six.moves import range
import tensorflow.compat.v1 as tf # tf
from rouge import rouge_scorer
from summae import text_encoder
_ROCSTORIES_BASE = 'rocstories_springwintertrain.all'
PAD_ID = 0
EOS_ID = 1
def file_list(data_dir, subset, task='wiki103'):
"""Get list of example files."""
if task == 'wiki103':
pattern = os.path.join(data_dir, 'encoded.%s.*.tfrecord' % subset)
return tf.gfile.Glob(pattern)
elif task == 'rocstories':
if subset == 'train':
flist = tf.gfile.Glob(os.path.join(
data_dir, _ROCSTORIES_BASE + '.000[0-9].tfrecord'))
flist.extend(tf.gfile.Glob(os.path.join(
data_dir, _ROCSTORIES_BASE + '.001[0-7].tfrecord')))
assert len(flist) == 18
elif subset == 'valid':
flist = [os.path.join(data_dir, _ROCSTORIES_BASE + '.0018.tfrecord')]
elif subset == 'valid_gt':
flist = [os.path.join(data_dir, 'rocstories_gt.valid.tfrecord')]
elif subset == 'test_gt':
flist = [os.path.join(data_dir, 'rocstories_gt.test.tfrecord')]
else:
# Test
flist = [os.path.join(data_dir, _ROCSTORIES_BASE + '.0019.tfrecord')]
tf.logging.info('File list for %s: %s', subset, flist)
return flist
else:
tf.logging.fatal('Unsupported task %s', task)
def get_tokenizer(path):
tf.logging.info('Loaded vocabulary from: %s', path)
return text_encoder.SubwordTextEncoder(path)
def strip_after_eos(token_ids):
for i, tid in enumerate(token_ids):
if tid == text_encoder.EOS_ID:
return token_ids[:i]
# No EOS
return token_ids
def add_summary_if_exists(name, tensor):
try:
tf.summary.scalar(name, tensor)
except NameError:
tf.logging.info('skipping %s tf.summary', name)
def compute_avg_rouge(decodes, targets):
"""Computes ROUGE between two lists of strings.
Args:
decodes: list of strings for candidate
targets: list of reference strings
Returns:
3-tuple of rouge-1, rouge-2, rouge-L
"""
# TODO(peterjliu): Use BootstrapAggregator.
assert decodes
assert len(decodes) == len(targets)
rtypes = ['rouge1', 'rouge2', 'rougeL']
rs = rouge_scorer.RougeScorer(rtypes,
use_stemmer=True)
rouge_f = {}
for i in range(len(decodes)):
score_dict = rs.score(targets[i], decodes[i])
for rtype in rtypes:
if rtype not in rouge_f:
rouge_f[rtype] = []
rouge_f[rtype].append(score_dict[rtype].fmeasure)
def mean(lst):
return sum(lst) / len(lst)
return (mean(rouge_f['rouge1']),
mean(rouge_f['rouge2']),
mean(rouge_f['rougeL']))
def get_tokenizer_with_special(init_vocab_file_path, special_tokens):
"""Returns a text_encoder.SubwordTokenizer, but with extra special tokens.
New tokens are added to end of the vocabulary so that tokenized doesn't need
to be re-generated with new vocab.
Args:
init_vocab_file_path: path to initial vocab file
special_tokens: string list of special tokens to add
Returns:
A tuple of (tokenizer with special tokens, dict of special_token->id).
"""
# Add extra reserved tokens to end of vocab a temporary file.
with tf.gfile.Open(init_vocab_file_path, 'rb') as f:
contents = f.read()
with tempfile.NamedTemporaryFile(delete=True) as f:
f.write(contents)
for s in special_tokens:
f.write(six.ensure_binary('%s_\n' % s, 'utf-8'))
f.flush()
tk = text_encoder.SubwordTextEncoder(f.name)
ids = {}
# Each line of vocab ends with '\n', so there is vocab+1 elements
# in the result of the below split.
o_size = len(six.ensure_str(contents, 'utf-8').split('\n')) - 1
for i, s in enumerate(special_tokens):
ids[s] = o_size + i
return (tk, ids)
def read_records(filename):
reader = tf.python_io.tf_record_iterator(filename)
records = []
for record in reader:
records.append(record)
if len(records) % 100000 == 0:
tf.logging.info('read: %d', len(records))
return records
def get_seq_exs(filename):
def parse(r):
s = tf.train.SequenceExample()
s.ParseFromString(r)
return s
return [parse(r) for r in read_records(filename)]
def write_records(records, out_filename):
writer = tf.python_io.TFRecordWriter(out_filename)
for count, record in enumerate(records):
writer.write(record)
if count > 0 and count % 100000 == 0:
tf.logging.info('write: %d', count)
writer.close()
def get_story(s):
return b' '.join(
[f.bytes_list.value[0]
for f in s.feature_lists.feature_list['untokenized_sentences'].feature])
def get_id(s):
return s.context.feature['storyid'].bytes_list.value[0]
def get_mturk_ground_truth(file_path):
"""Returns dict of story->summary_list."""
story2summaries = collections.defaultdict(list)
with tf.gfile.Open(file_path) as f:
reader = csv.DictReader(f)
for row in reader:
story = row['Input.story']
story2summaries[story].append(row['Answer.summary'])
return story2summaries
def checkpoint_file_gen(estimator, step_list_csv, sleep_secs,
max_sleep_secs=1800):
"""Yields model checkpoints for an estimator.
Args:
estimator: tf.Estimator object
step_list_csv: csv string of checkpoints
sleep_secs: how many seconds to wait for next checkpoint
max_sleep_secs: maximum cumulative sleep seconds
Yields:
Checkpoint file name.
"""
if step_list_csv:
for s in six.ensure_str(step_list_csv, 'utf-8').split(','):
yield os.path.join(estimator.model_dir, 'model.ckpt-%s' % s)
else:
# Continuous eval
prev_ckpt_file = ''
total_sleep_secs = 0.0
while True:
ckpt_file = estimator.latest_checkpoint()
if ckpt_file is None or ckpt_file == prev_ckpt_file:
# First checkpoint may cause issues with eval
if total_sleep_secs > max_sleep_secs:
tf.logging.info(
'Slept for %g s. Probably training is done. Exiting.',
total_sleep_secs)
break
tf.logging.info('sleep for a %g s', sleep_secs)
time.sleep(sleep_secs)
total_sleep_secs += sleep_secs
continue
else:
total_sleep_secs = 0
prev_ckpt_file = ckpt_file
yield ckpt_file
|
|
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon_lib import exceptions
from horizon_lib import forms
from horizon_lib import messages
from horizon_lib import tabs
from horizon_lib.utils import memoized
from horizon_lib import workflows
from openstack_horizon import api
from openstack_horizon.dashboards.project.loadbalancers \
import forms as project_forms
from openstack_horizon.dashboards.project.loadbalancers \
import tables as project_tables
from openstack_horizon.dashboards.project.loadbalancers \
import tabs as project_tabs
from openstack_horizon.dashboards.project.loadbalancers import utils
from openstack_horizon.dashboards.project.loadbalancers \
import workflows as project_workflows
import re
class IndexView(tabs.TabView):
tab_group_class = (project_tabs.LoadBalancerTabs)
template_name = 'project/loadbalancers/details_tabs.html'
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
m = re.search('.delete([a-z]+)', action).group(1)
if obj_ids == []:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if m == 'monitor':
for obj_id in obj_ids:
try:
api.lbaas.pool_health_monitor_delete(request, obj_id)
messages.success(request, _('Deleted monitor %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete monitor. %s') % e)
if m == 'pool':
for obj_id in obj_ids:
try:
api.lbaas.pool_delete(request, obj_id)
messages.success(request, _('Deleted pool %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete pool. %s') % e)
if m == 'member':
for obj_id in obj_ids:
try:
api.lbaas.member_delete(request, obj_id)
messages.success(request, _('Deleted member %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete member. %s') % e)
if m == 'vip':
for obj_id in obj_ids:
try:
vip_id = api.lbaas.pool_get(request, obj_id).vip_id
except Exception as e:
exceptions.handle(request,
_('Unable to locate VIP to delete. %s')
% e)
if vip_id is not None:
try:
api.lbaas.vip_delete(request, vip_id)
messages.success(request, _('Deleted VIP %s') % vip_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete VIP. %s') % e)
return self.get(request, *args, **kwargs)
class AddPoolView(workflows.WorkflowView):
workflow_class = project_workflows.AddPool
class AddVipView(workflows.WorkflowView):
workflow_class = project_workflows.AddVip
def get_initial(self):
initial = super(AddVipView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['subnet'] = api.neutron.subnet_get(
self.request, pool.subnet_id).cidr
except Exception as e:
initial['subnet'] = ''
msg = _('Unable to retrieve pool subnet. %s') % e
exceptions.handle(self.request, msg)
return initial
class AddMemberView(workflows.WorkflowView):
workflow_class = project_workflows.AddMember
class AddMonitorView(workflows.WorkflowView):
workflow_class = project_workflows.AddMonitor
class PoolDetailsView(tabs.TabView):
tab_group_class = project_tabs.PoolDetailsTabs
template_name = 'project/loadbalancers/details_tabs.html'
@memoized.memoized_method
def get_data(self):
pid = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, pid)
except Exception:
pool = []
exceptions.handle(self.request,
_('Unable to retrieve pool details.'))
else:
for monitor in pool.health_monitors:
display_name = utils.get_monitor_display_name(monitor)
setattr(monitor, 'display_name', display_name)
return pool
def get_context_data(self, **kwargs):
context = super(PoolDetailsView, self).get_context_data(**kwargs)
pool = self.get_data()
context['pool'] = pool
table = project_tables.PoolsTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(pool)
return context
def get_tabs(self, request, *args, **kwargs):
pool = self.get_data()
return self.tab_group_class(self.request, pool=pool, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class VipDetailsView(tabs.TabView):
tab_group_class = project_tabs.VipDetailsTabs
template_name = 'project/loadbalancers/details_tabs.html'
class MemberDetailsView(tabs.TabView):
tab_group_class = project_tabs.MemberDetailsTabs
template_name = 'project/loadbalancers/details_tabs.html'
@memoized.memoized_method
def get_data(self):
mid = self.kwargs['member_id']
try:
return api.lbaas.member_get(self.request, mid)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve member details.'))
def get_context_data(self, **kwargs):
context = super(MemberDetailsView, self).get_context_data(**kwargs)
member = self.get_data()
context['member'] = member
table = project_tables.MembersTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(member)
return context
def get_tabs(self, request, *args, **kwargs):
member = self.get_data()
return self.tab_group_class(request, member=member, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class MonitorDetailsView(tabs.TabView):
tab_group_class = project_tabs.MonitorDetailsTabs
template_name = 'project/loadbalancers/details_tabs.html'
@memoized.memoized_method
def get_data(self):
mid = self.kwargs['monitor_id']
try:
return api.lbaas.pool_health_monitor_get(self.request, mid)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve monitor details.'))
def get_context_data(self, **kwargs):
context = super(MonitorDetailsView, self).get_context_data(**kwargs)
monitor = self.get_data()
context['monitor'] = monitor
table = project_tables.MonitorsTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(monitor)
return context
def get_tabs(self, request, *args, **kwargs):
monitor = self.get_data()
return self.tab_group_class(request, monitor=monitor, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class UpdatePoolView(forms.ModalFormView):
form_class = project_forms.UpdatePool
template_name = "project/loadbalancers/updatepool.html"
context_object_name = 'pool'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdatePoolView, self).get_context_data(**kwargs)
context["pool_id"] = self.kwargs['pool_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
pool_id = self.kwargs['pool_id']
try:
return api.lbaas.pool_get(self.request, pool_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve pool details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
pool = self._get_object()
return {'name': pool['name'],
'pool_id': pool['id'],
'description': pool['description'],
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up']}
class UpdateVipView(forms.ModalFormView):
form_class = project_forms.UpdateVip
template_name = "project/loadbalancers/updatevip.html"
context_object_name = 'vip'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdateVipView, self).get_context_data(**kwargs)
context["vip_id"] = self.kwargs['vip_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
vip_id = self.kwargs['vip_id']
try:
return api.lbaas.vip_get(self.request, vip_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve VIP details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
vip = self._get_object()
persistence = getattr(vip, 'session_persistence', None)
if persistence:
stype = persistence['type']
if stype == 'APP_COOKIE':
cookie = persistence['cookie_name']
else:
cookie = ''
else:
stype = ''
cookie = ''
return {'name': vip['name'],
'vip_id': vip['id'],
'description': vip['description'],
'pool_id': vip['pool_id'],
'session_persistence': stype,
'cookie_name': cookie,
'connection_limit': vip['connection_limit'],
'admin_state_up': vip['admin_state_up']}
class UpdateMemberView(forms.ModalFormView):
form_class = project_forms.UpdateMember
template_name = "project/loadbalancers/updatemember.html"
context_object_name = 'member'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdateMemberView, self).get_context_data(**kwargs)
context["member_id"] = self.kwargs['member_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
member_id = self.kwargs['member_id']
try:
return api.lbaas.member_get(self.request, member_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve member details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
member = self._get_object()
return {'member_id': member['id'],
'pool_id': member['pool_id'],
'weight': member['weight'],
'admin_state_up': member['admin_state_up']}
class UpdateMonitorView(forms.ModalFormView):
form_class = project_forms.UpdateMonitor
template_name = "project/loadbalancers/updatemonitor.html"
context_object_name = 'monitor'
success_url = reverse_lazy("horizon:project:loadbalancers:index")
def get_context_data(self, **kwargs):
context = super(UpdateMonitorView, self).get_context_data(**kwargs)
context["monitor_id"] = self.kwargs['monitor_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
monitor_id = self.kwargs['monitor_id']
try:
return api.lbaas.pool_health_monitor_get(self.request, monitor_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve health monitor details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
monitor = self._get_object()
return {'monitor_id': monitor['id'],
'delay': monitor['delay'],
'timeout': monitor['timeout'],
'max_retries': monitor['max_retries'],
'admin_state_up': monitor['admin_state_up']}
class AddPMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.AddPMAssociation
def get_initial(self):
initial = super(AddPMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
class DeletePMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.DeletePMAssociation
def get_initial(self):
initial = super(DeletePMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
|
|
import fcntl
import os
import random
import requests
import shutil
import subprocess
import time
import heroku
from will.mixins import StorageMixin
from will.utils import Bunch
from will import settings
STACK_NAMES = [
"alder",
"ash",
"aspen",
"basswood",
"birch",
"buckeye",
"buckthorn",
"catalpa",
"cedar",
"cherry",
"chestnut",
"chinkapin",
"cottonwood",
"cypress",
"dogwood",
"douglas-fir",
"elm",
"fir",
"filbert",
"giant-sequoia",
"hawthorn",
"hazel",
"hemlock",
"holly",
"horse-chestnut",
"juniper",
"larch",
"locust",
"madrone",
"maple",
"mountain-ash",
"mountain-mahogany",
"oak",
"oregon-myrtle",
"pear",
"pine",
"plum",
"poplar",
"red-cedar",
"redwood",
"spruce",
"sweetgum",
"sycamore",
"walnut",
"white-cedar",
"willow",
"yew",
]
COLLABORATOR_EMAILS = [
"matt@buddyup.org",
"steven@buddyup.org",
"brian@buddyup.org",
"will@buddyup.org",
]
STACKS_KEY = "will.servers.stacks"
def non_blocking_read(output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return output.read()
except:
return None
class Stack(Bunch):
@property
def adapter(self):
if not hasattr(self, "_adapter"):
self._adapter = HerokuAdapter(self)
return self._adapter
def ensure_created(self):
return self.adapter.ensure_created()
def deploy(self, new_config, code_only=False, force=False):
print "Deploying"
self.branch.deploy_config = new_config
return self.adapter.deploy(code_only=code_only, force=force)
def destroy(self):
print "destroying"
return self.adapter.destroy()
def get_monthly_cost(self):
return self.adapter.get_monthly_cost()
@property
def title(self):
return self.name.title()
@property
def id(self):
return self.name.lower().replace(" ", "_")
@property
def url_name(self):
return "%s%s" % (settings.DEPLOY_PREFIX, self.name.lower().replace(" ", "-"))
@property
def url(self):
return self.adapter.url
@property
def deploy_config(self):
return self.branch.deploy_config
@property
def deploy_output_key(self):
return "deploy_output_%s" % self.id
@property
def deploy_log_url(self):
return "%s/deploy-log/%s" % (settings.URL, self.id)
@property
def active_deploy_key(self):
return "active_deploy_%s" % self.id
class HerokuAdapter(Bunch, StorageMixin):
def __init__(self, stack, *args, **kwargs):
super(HerokuAdapter, self).__init__(*args, **kwargs)
self.stack = stack
self.heroku = heroku.from_key(settings.HEROKU_API_KEY)
def ensure_cli_auth(self):
cli_auth_path = os.path.abspath(os.path.expanduser("~/.will_cli_auth"))
if not os.path.exists(cli_auth_path):
netrc_path = os.path.abspath(os.path.expanduser("~/.netrc"))
if not os.path.exists(netrc_path):
with open(netrc_path, 'w+') as f:
f.write("""
machine api.heroku.com
login %(email)s
password %(token)s
machine code.heroku.com
login %(email)s
password %(token)s
""" % {
"email": settings.HEROKU_EMAIL,
"token": settings.HEROKU_API_KEY,
})
ssh_dir = os.path.abspath(os.path.expanduser("~/.ssh"))
if not os.path.exists(ssh_dir):
os.makedirs(ssh_dir)
ssh_config_path = os.path.abspath(os.path.expanduser("~/.ssh/config"))
if not os.path.exists(ssh_config_path):
with open(ssh_config_path, 'w+') as f:
f.write("""
UserKnownHostsFile /dev/null
StrictHostKeyChecking no
""")
id_rsa_path = os.path.abspath(os.path.expanduser("~/.ssh/will_id_rsa"))
if not os.path.exists(id_rsa_path):
with open(id_rsa_path, 'w+') as f:
f.write(settings.SSH.replace(";;", "\n"))
id_rsa_pub_path = os.path.abspath(os.path.expanduser("~/.ssh/will_id_rsa.pub"))
if not os.path.exists(id_rsa_pub_path):
with open(id_rsa_pub_path, 'w+') as f:
f.write(settings.SSH_PUB)
self.run_command("chmod 600 will_id_rsa", cwd=ssh_dir, auth_first=False)
with open(cli_auth_path, 'w+') as f:
f.write("Done")
def command_with_ssh(self, command):
return 'ssh-agent bash -c "ssh-add ~/.ssh/will_id_rsa; %s 2>&1"' % command.replace('"', r'\"')
def add_to_saved_output(self, additional_output, with_newline=True):
output = self.load(self.stack.deploy_output_key, "")
if with_newline:
output = "%s%s\n" % (output, additional_output)
else:
output = "%s%s" % (output, additional_output)
self.save(self.stack.deploy_output_key, output)
def run_subprocess_with_saved_output(self, command,):
output = self.load(self.stack.deploy_output_key, "")
output = "%s\n" % output
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
while p.poll() is None:
line = non_blocking_read(p.stdout)
if line:
if line[:len("Identity added: ")] != "Identity added: ":
output = "%s%s" % (output, line)
self.save(self.stack.deploy_output_key, output)
time.sleep(1)
try:
changed = False
out, err = p.communicate()
if out:
out = out.replace("\r", "")
output = "%s\n%s" % (output, out)
changed = True
if err:
err = err.replace("\r", "")
output = "%s\n%s" % (output, err)
changed = True
except ValueError:
pass
if changed:
self.save(self.stack.deploy_output_key, output)
if p.returncode != 0:
output = '%s\n\n======================\nError running %s (returncode: %s)' % (output, command, p.returncode)
self.save(self.stack.deploy_output_key, output)
raise Exception("Error running %sn\%s" % (command, output))
def run_heroku_cli_command(self, command, app=None, stream_output=True, cwd=None):
self.ensure_cli_auth()
if not "--app" in command and not "-a " in command:
if app:
command = "%s --app %s" % (command, app)
else:
command = "%s --app %s" % (command, self.stack.url_name)
command = self.command_with_ssh("heroku %s" % command)
if cwd:
command = "cd %s; %s" % (cwd, command)
print "running %s" % command
if not stream_output:
return subprocess.check_output(command, shell=True)
else:
return self.run_subprocess_with_saved_output(command)
def run_command(self, command, cwd=None, stream_output=True, auth_first=True):
if auth_first:
self.ensure_cli_auth()
if cwd:
command = "cd %s; %s" % (cwd, command)
command = self.command_with_ssh(command)
print "running %s" % command
if not stream_output:
return subprocess.check_output(command, shell=True)
else:
return self.run_subprocess_with_saved_output(command)
def get_code_dir(self):
"""Get a unique dir for this stack for holding the code (should speed up redeploys)"""
base_code_dir = os.path.abspath(os.path.expanduser("~/.will_codebases"))
if not os.path.exists(base_code_dir):
os.makedirs(base_code_dir)
stack_code_dir = os.path.join(base_code_dir, self.stack.id)
if not os.path.exists(stack_code_dir):
os.makedirs(stack_code_dir)
return stack_code_dir
def deploy(self, code_only=False, force=False):
if self.load(self.stack.active_deploy_key, False) and not force:
raise Exception("Deploy already in progress!")
else:
self.save(self.stack.active_deploy_key, True)
try:
self.ensure_created()
if not code_only:
# Clone the DB
if "cloned_database" in self.stack.deploy_config["heroku"]:
self.add_to_saved_output("Cloning the database:")
self.run_heroku_cli_command("pgbackups:capture --app %s --expire" % self.stack.deploy_config["heroku"]["cloned_database"])
self.add_to_saved_output(" - New backup made.")
url = self.run_heroku_cli_command("pgbackups:url --app %s" % self.stack.deploy_config["heroku"]["cloned_database"], stream_output=False).replace("\n","")
self.add_to_saved_output(" - URL verified.")
cached_config = dict(self.app.config.data)
stack_db_config_name = "DATABASE"
for k,v in cached_config.items():
if "HEROKU_POSTGRESQL_" in k:
stack_db_config_name = k
break
self.run_heroku_cli_command("pgbackups:restore %s --app %s --confirm %s %s " % (stack_db_config_name, self.stack.url_name, self.stack.url_name, url, ))
self.add_to_saved_output(" - Database restored.")
# Push code
code_dir = self.get_code_dir()
repo_dir = os.path.join(self.get_code_dir(), "repo")
# Make sure we have the code
if not os.path.exists(os.path.join(repo_dir, ".git", "config")):
self.add_to_saved_output("Cloning codebase:")
self.run_command("git clone %s repo" % self.stack.branch.repo_clone_url, cwd=code_dir)
self.run_command("git remote add heroku git@heroku.com:%s.git" % self.stack.url_name, cwd=repo_dir)
self.add_to_saved_output("Updating code:")
self.add_to_saved_output(" - fetching origin... ", with_newline=False)
self.run_command("git fetch origin %s" % self.stack.branch.name, cwd=repo_dir, stream_output=False)
self.add_to_saved_output("done.")
self.add_to_saved_output(" - checking out %s... " % self.stack.branch.name, with_newline=False)
self.run_command("git checkout %s" % self.stack.branch.name, cwd=repo_dir, stream_output=False)
self.add_to_saved_output("done.")
self.add_to_saved_output(" - pulling latest changes... ", with_newline=False)
self.run_command("git pull", cwd=repo_dir, stream_output=False)
self.add_to_saved_output("done.")
# Push to heroku
self.add_to_saved_output("Pushing to heroku:")
self.run_command("git push heroku %s:master --force" % self.stack.branch.name, cwd=repo_dir)
# Post-deploy hooks
if "post_deploy" in self.stack.deploy_config["heroku"]:
self.add_to_saved_output("Running post-deploy commands:")
post_deploy_command_types = self.stack.deploy_config["heroku"]["post_deploy"]
if "heroku" in post_deploy_command_types:
for cmd in post_deploy_command_types["heroku"]:
self.add_to_saved_output(" - heroku %s" % cmd)
self.run_heroku_cli_command(cmd, cwd=repo_dir)
if "shell" in post_deploy_command_types:
for cmd in post_deploy_command_types["shell"]:
self.add_to_saved_output(" - %s" % cmd)
self.run_command(cmd, cwd=repo_dir)
# Scale
if "scale" in self.stack.deploy_config["heroku"]:
self.add_to_saved_output("Scaling:")
for service, num_workers in self.stack.deploy_config["heroku"]["scale"].items():
self.run_heroku_cli_command("scale %s=%s" % (service, num_workers))
self.add_to_saved_output("- %s=%s" % (service, num_workers))
self.add_to_saved_output("Deploy complete")
self.add_to_saved_output('<a href="%s">%s</a>' % (self.stack.url, self.stack.url))
self.save(self.stack.active_deploy_key, False)
except:
self.save(self.stack.active_deploy_key, False)
raise
def ensure_collaborators(self):
self.add_to_saved_output("Ensuring collaborators:")
self.collaborators = [c.email for c in self.app.collaborators]
for c in COLLABORATOR_EMAILS:
self.add_to_saved_output(" - %s" % c)
if not c in self.collaborators:
data = {
"user": c,
"silent": "true",
}
headers = {
'Accept': 'application/vnd.heroku+json; version=3',
}
r = requests.post(
"https://api.heroku.com/apps/%s/collaborators" % self.stack.url_name,
headers=headers,
data=data,
auth=(settings.HEROKU_EMAIL, settings.HEROKU_API_KEY),
)
if not r.status_code == 200 and not r.status_code == 201:
if "message" in r.json() and not "is already a collaborator" in r.json()["message"]:
raise Exception("Unable to add %s as a collaborator. (%s)" % (c, r.json()["message"]))
def ensure_created(self):
self.save(self.stack.deploy_output_key, "")
creating = False
try:
# Get or create the app
try:
self.app = self.heroku.apps[self.stack.url_name]
self.add_to_saved_output("App exists: %s" % self.stack.url_name)
except:
creating = True
forked = False
if "fork" in self.stack.deploy_config["heroku"]:
self.run_heroku_cli_command("fork -a %s %s" %
(self.stack.deploy_config["heroku"]["fork"], self.stack.url_name)
)
self.add_to_saved_output("Forked to new app: %s" % self.stack.url_name)
self.app = self.heroku.apps[self.stack.url_name]
forked = True
else:
self.app = self.heroku.apps.add(self.stack.url_name)
self.add_to_saved_output("Created new app: %s" % self.stack.url_name)
self.addons = [k.name for k in self.app.addons]
cached_config = dict(self.app.config.data)
# Collaborators
self.ensure_collaborators()
# Labs
if "labs" in self.stack.deploy_config["heroku"]:
self.add_to_saved_output("Configuring Labs")
lab_config = self.run_heroku_cli_command("labs", stream_output=False)
for lab_feature in self.stack.deploy_config["heroku"]["labs"]:
self.add_to_saved_output(" - %s" % lab_feature)
enabled_str = "[+] %s" % lab_feature
if not enabled_str in lab_config:
self.run_heroku_cli_command("labs:enable %s" % (lab_feature,))
for lab_feature in lab_config.split("\n"):
if lab_feature[:3] == "[+]":
feature_name = lab_feature[4:lab_feature.find(" ",5)]
print "enabled: %s" % feature_name
if feature_name not in self.stack.deploy_config["heroku"]["labs"]:
self.add_to_saved_output(" - %s (removed)" % feature_name)
self.run_heroku_cli_command("labs:disable %s --confirm %s" % (lab_feature, self.stack.url_name))
# Addons
if "addons" in self.stack.deploy_config["heroku"]:
self.add_to_saved_output("Configuring addons:")
for addon in self.stack.deploy_config["heroku"]["addons"]:
self.add_to_saved_output(" - %s" % addon)
if addon not in self.addons:
print addon
if type(addon) == type(""):
addon_name = addon
addon_add_str = addon
else:
for addon_name, options_list in addon.items():
addon_add_str = addon_name
for option in options_list:
addon_add_str = "%s %s" % (addon_add_str, option)
try:
self.run_heroku_cli_command("addons:add %s --confirm %s" % (addon_add_str, self.stack.url_name))
self.addons.append(addon_name)
except Exception, e:
if "already installed" in e:
self.run_heroku_cli_command("addons:remove %s --confirm %s" % (addon_name, self.stack.url_name))
self.run_heroku_cli_command("addons:add %s --confirm %s" % (addon_add_str, self.stack.url_name))
self.addons.append(addon_name)
for addon_name in self.addons:
if addon_name.split(" ")[0] not in self.stack.deploy_config["heroku"]["addons"]:
self.run_heroku_cli_command("addons:remove %s --confirm %s" % (addon_name, self.stack.url_name))
self.add_to_saved_output(" - %s (removed)" % addon)
# Static Config
if "config" in self.stack.deploy_config["heroku"]:
self.add_to_saved_output("Configuring environment:")
for k, v in self.stack.deploy_config["heroku"]["config"].items():
self.add_to_saved_output(" - %s" % k)
if k not in cached_config or cached_config[k] != v:
if type(v) == type(""):
v = v.replace("$APP_NAME", self.stack.url_name)
self.app.config[k] = v
# Cloned config
if "cloned_config" in self.stack.deploy_config["heroku"]:
for app in self.stack.deploy_config["heroku"]["cloned_config"]:
other_app = self.heroku.apps[app]
other_cached_config = dict(other_app.config.data)
for k in self.stack.deploy_config["heroku"]["cloned_config"][app]:
self.add_to_saved_output(" - %s" % k)
if ((k not in cached_config and k in other_cached_config) or
(k in other_cached_config and cached_config[k] != other_cached_config[k])):
print "%s=%s" % (k, other_cached_config[k])
self.app.config[k] = other_cached_config[k]
except:
import traceback; traceback.print_exc();
if creating:
self.destroy(while_crashing=True)
raise
def destroy(self, while_crashing=False):
if not while_crashing:
self.save(self.stack.deploy_output_key, "")
try:
app = self.heroku.apps[self.stack.url_name]
code_dir = self.get_code_dir()
if os.path.exists(code_dir):
shutil.rmtree(code_dir)
app.destroy()
except KeyError:
# It's already been deleted.
pass
def get_monthly_cost(self):
print "get_monthly_cost"
@property
def url(self):
return "https://%s.herokuapp.com" % (self.stack.url_name)
class ServersMixin(object):
@property
def stacks(self):
return self.load(STACKS_KEY, {})
def save_stacks(self, stacks):
self.save(STACKS_KEY, stacks)
self._stacks = stacks
def new_stack(self, branch):
stacks = self.stacks
new_name = self.get_unused_stack_name()
new_stack = Stack(branch=branch, name=new_name)
stacks[new_stack.id] = new_stack
self.save_stacks(stacks)
return new_stack
def deploy(self, stack, branch=None, code_only=False, force=False):
if branch:
stack.branch = branch
config = stack.branch.deploy_config
stack.deploy(config, code_only=code_only, force=force)
def destroy_stack(self, stack):
stack.destroy()
stacks = self.stacks
del stacks[stack.id]
self.save_stacks(stacks)
self._stacks = stacks
def get_unused_stack_name(self):
have_a_good_name = False
while have_a_good_name is False:
new_name = random.choice(STACK_NAMES)
have_a_good_name = True
for stack_id, stack in self.stacks.items():
if stack.name == new_name:
have_a_good_name = False
return new_name
def get_stack_from_stack_name(self, stack_name):
# ID match
if stack_name in self.stacks:
return self.stacks[stack_name]
# Strict match
for stack_id, s in self.stacks.items():
if s.name == stack_name:
return s
# Looser match
for stack_id, s in self.stacks.items():
if s.id == stack_name or s.url_name == stack_name or s.name == stack_name:
return s
# Really loose match
stack_name = stack_name.lower()
for stack_id, s in self.stacks.items():
if s.id.lower() == stack_name or s.url_name.lower() == stack_name or s.id.lower() == stack_name or s.name.lower() == stack_name:
return s
return None
def get_stack_from_branch_name(self, branch_name):
for s_id, s in self.stacks.items():
if s.branch.name == branch_name or s.branch.name == "feature/%s" % (branch_name,):
return Stack.restore_from_state(s)
return None
def prefixed_name(self, name):
return "%s%s" % (settings.DEPLOY_PREFIX, name)
|
|
"""Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import make_dataset
from .sag_fast import sag
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept,
n_samples=None,
is_saga=False):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
n_samples : int, optional
Number of rows in X. Useful if is_saga=True.
is_saga : boolean, optional
Whether to return step size for the SAGA algorithm or the SAG
algorithm.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
"""
if loss in ('log', 'multinomial'):
L = (0.25 * (max_squared_sum + int(fit_intercept)) + alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
L = max_squared_sum + int(fit_intercept) + alpha_scaled
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
if is_saga:
# SAGA theoretical step size is 1/3L or 1 / (2 * (L + mu n))
# See Defazio et al. 2014
mun = min(2 * n_samples * alpha_scaled, L)
step = 1. / (2 * L + mun)
else:
# SAG theoretical step size is 1/16L but it is recommended to use 1 / L
# see http://www.birs.ca//workshops//2014/14w5003/files/schmidt.pdf,
# slide 65
step = 1. / L
return step
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., beta=0.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None,
is_saga=False):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter : int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol : double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose : integer, optional
The verbosity level.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem : dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
is_saga : boolean, optional
Whether to use the SAGA algorithm or the SAG algorithm. SAGA behaves
better in the first epochs, and allow for l1 regularisation.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
beta_scaled = float(beta) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept, n_samples=n_samples,
is_saga=is_saga)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
beta_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
is_saga,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
|
|
# Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import
import sys
import os
import json
import click
import hashlib
import cProfile
import StringIO
import pstats
import frappe
import frappe.utils
from frappe.utils import cint
from distutils.spawn import find_executable
from functools import wraps
click.disable_unicode_literals_warning = True
def pass_context(f):
@wraps(f)
def _func(ctx, *args, **kwargs):
profile = ctx.obj['profile']
if profile:
pr = cProfile.Profile()
pr.enable()
ret = f(frappe._dict(ctx.obj), *args, **kwargs)
if profile:
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s)\
.sort_stats('cumtime', 'tottime', 'ncalls')
ps.print_stats()
print s.getvalue()
return ret
return click.pass_context(_func)
def get_single_site(context):
if not len(context.sites) == 1:
print 'please select a site'
sys.exit(1)
site = context.sites[0]
return site
def call_command(cmd, context):
return click.Context(cmd, obj=context).forward(cmd)
@click.command('new-site')
@click.argument('site')
@click.option('--db-name', help='Database name')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--admin-password', help='Administrator password for new site', default=None)
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
@click.option('--force', help='Force restore if site/database already exists', is_flag=True, default=False)
@click.option('--source_sql', help='Initiate database with a SQL file')
@click.option('--install-app', multiple=True, help='Install app after installation')
def new_site(site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None, force=None, install_app=None, db_name=None):
"Install a new site"
if not db_name:
db_name = hashlib.sha1(site).hexdigest()[:10]
frappe.init(site=site)
_new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=verbose, install_apps=install_app, source_sql=source_sql, force=force)
if len(frappe.utils.get_sites()) == 1:
use(site)
def _new_site(db_name, site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None,force=False, reinstall=False):
"Install a new Frappe site"
from frappe.installer import install_db, make_site_dirs
from frappe.installer import install_app as _install_app
import frappe.utils.scheduler
frappe.init(site=site)
try:
# enable scheduler post install?
enable_scheduler = _is_scheduler_enabled()
except:
enable_scheduler = False
install_db(root_login=mariadb_root_username, root_password=mariadb_root_password, db_name=db_name, admin_password=admin_password, verbose=verbose, source_sql=source_sql,force=force, reinstall=reinstall)
make_site_dirs()
_install_app("frappe", verbose=verbose, set_as_patched=not source_sql)
if frappe.conf.get("install_apps"):
for app in frappe.conf.install_apps:
_install_app(app, verbose=verbose, set_as_patched=not source_sql)
if install_apps:
for app in install_apps:
_install_app(app, verbose=verbose, set_as_patched=not source_sql)
frappe.utils.scheduler.toggle_scheduler(enable_scheduler)
scheduler_status = "disabled" if frappe.utils.scheduler.is_scheduler_disabled() else "enabled"
print "*** Scheduler is", scheduler_status, "***"
frappe.destroy()
def _is_scheduler_enabled():
enable_scheduler = False
try:
frappe.connect()
enable_scheduler = cint(frappe.db.get_single_value("System Settings", "enable_scheduler")) and True or False
except:
pass
finally:
frappe.db.close()
return enable_scheduler
@click.command('restore')
@click.argument('sql-file-path')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--db-name', help='Database name for site in case it is a new one')
@click.option('--admin-password', help='Administrator password for new site')
@click.option('--install-app', multiple=True, help='Install app after installation')
@pass_context
def restore(context, sql_file_path, mariadb_root_username=None, mariadb_root_password=None, db_name=None, verbose=None, install_app=None, admin_password=None, force=None):
"Restore site database from an sql file"
site = get_single_site(context)
frappe.init(site=site)
if not db_name:
db_name = frappe.conf.db_name
_new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=context.verbose, install_apps=install_app, source_sql=sql_file_path, force=context.force)
@click.command('reinstall')
@pass_context
def reinstall(context):
"Reinstall site ie. wipe all data and start over"
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.clear_cache()
installed = frappe.get_installed_apps()
frappe.clear_cache()
except Exception:
installed = []
finally:
if frappe.db:
frappe.db.close()
frappe.destroy()
frappe.init(site=site)
_new_site(frappe.conf.db_name, site, verbose=context.verbose, force=True, reinstall=True, install_apps=installed)
@click.command('install-app')
@click.argument('app')
@pass_context
def install_app(context, app):
"Install a new app to site"
from frappe.installer import install_app as _install_app
for site in context.sites:
frappe.init(site=site)
frappe.connect()
try:
_install_app(app, verbose=context.verbose)
finally:
frappe.destroy()
@click.command('list-apps')
@pass_context
def list_apps(context):
"Reinstall site ie. wipe all data and start over"
site = get_single_site(context)
frappe.init(site=site)
frappe.connect()
print "\n".join(frappe.get_installed_apps())
frappe.destroy()
@click.command('add-system-manager')
@click.argument('email')
@click.option('--first-name')
@click.option('--last-name')
@pass_context
def add_system_manager(context, email, first_name, last_name):
"Add a new system manager to a site"
import frappe.utils.user
for site in context.sites:
frappe.connect(site=site)
try:
frappe.utils.user.add_system_manager(email, first_name, last_name)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('migrate')
@click.option('--rebuild-website', help="Rebuild webpages after migration")
@pass_context
def migrate(context, rebuild_website=False):
"Run patches, sync schema and rebuild files/translations"
import frappe.modules.patch_handler
import frappe.model.sync
from frappe.utils.fixtures import sync_fixtures
import frappe.translate
from frappe.desk.notifications import clear_notifications
for site in context.sites:
print 'Migrating', site
frappe.init(site=site)
frappe.connect()
try:
prepare_for_update()
# run patches
frappe.modules.patch_handler.run_all()
# sync
frappe.model.sync.sync_all(verbose=context.verbose)
frappe.translate.clear_cache()
sync_fixtures()
clear_notifications()
finally:
frappe.destroy()
if rebuild_website:
call_command(build_website, context)
else:
call_command(sync_www, context)
def prepare_for_update():
from frappe.sessions import clear_global_cache
clear_global_cache()
@click.command('run-patch')
@click.argument('module')
@pass_context
def run_patch(context, module):
"Run a particular patch"
import frappe.modules.patch_handler
for site in context.sites:
frappe.init(site=site)
try:
frappe.connect()
frappe.modules.patch_handler.run_single(module, force=context.force)
finally:
frappe.destroy()
@click.command('reload-doc')
@click.argument('module')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def reload_doc(context, module, doctype, docname):
"Reload schema for a DocType"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.reload_doc(module, doctype, docname, force=context.force)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('build')
@click.option('--make-copy', is_flag=True, default=False, help='Copy the files instead of symlinking')
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
def build(make_copy=False, verbose=False):
"Minify + concatenate JS and CSS files, build translations"
import frappe.build
import frappe
frappe.init('')
frappe.build.bundle(False, make_copy=make_copy, verbose=verbose)
@click.command('watch')
def watch():
"Watch and concatenate JS and CSS files as and when they change"
import frappe.build
frappe.init('')
frappe.build.watch(True)
@click.command('clear-cache')
@pass_context
def clear_cache(context):
"Clear cache, doctype cache and defaults"
import frappe.sessions
import frappe.website.render
from frappe.desk.notifications import clear_notifications
for site in context.sites:
try:
frappe.connect(site)
frappe.clear_cache()
clear_notifications()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
@click.command('clear-website-cache')
@pass_context
def clear_website_cache(context):
"Clear website cache"
import frappe.website.render
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
@click.command('destroy-all-sessions')
@pass_context
def destroy_all_sessions(context):
"Clear sessions of all users (logs them out)"
import frappe.sessions
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.sessions.clear_all_sessions()
frappe.db.commit()
finally:
frappe.destroy()
@click.command('sync-www')
@click.option('--force', help='Rebuild all pages', is_flag=True, default=False)
@pass_context
def sync_www(context, force=False):
"Sync files from static pages from www directory to Web Pages"
from frappe.website import statics
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
statics.sync_statics(rebuild=force)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('build-website')
@pass_context
def build_website(context):
"Sync statics and clear cache"
from frappe.website import render, statics
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
render.clear_cache()
statics.sync(verbose=context.verbose).start(True)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('setup-docs')
@pass_context
def setup_docs(context):
"Setup docs in target folder of target app"
from frappe.utils.setup_docs import setup_docs
from frappe.website import statics
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
setup_docs()
statics.sync_statics(rebuild=True)
finally:
frappe.destroy()
@click.command('reset-perms')
@pass_context
def reset_perms(context):
"Reset permissions for all doctypes"
from frappe.permissions import reset_perms
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
for d in frappe.db.sql_list("""select name from `tabDocType`
where ifnull(istable, 0)=0 and ifnull(custom, 0)=0"""):
frappe.clear_cache(doctype=d)
reset_perms(d)
finally:
frappe.destroy()
@click.command('execute')
@click.argument('method')
@pass_context
def execute(context, method):
"execute a function"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
print frappe.local.site
ret = frappe.get_attr(method)()
if frappe.db:
frappe.db.commit()
finally:
frappe.destroy()
if ret:
print ret
@click.command('celery')
@click.argument('args')
def celery(args):
"Run a celery command"
python = sys.executable
os.execv(python, [python, "-m", "frappe.celery_app"] + args.split())
@click.command('trigger-scheduler-event')
@click.argument('event')
@pass_context
def trigger_scheduler_event(context, event):
"Trigger a scheduler event"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.trigger(site, event, now=context.force)
finally:
frappe.destroy()
@click.command('enable-scheduler')
@pass_context
def enable_scheduler(context):
"Enable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.enable_scheduler()
frappe.db.commit()
print "Enabled for", site
finally:
frappe.destroy()
@click.command('disable-scheduler')
@pass_context
def disable_scheduler(context):
"Disable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.disable_scheduler()
frappe.db.commit()
print "Disabled for", site
finally:
frappe.destroy()
@click.command('export-doc')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def export_doc(context, doctype, docname):
"Export a single document to csv"
import frappe.modules
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.modules.export_doc(doctype, docname)
finally:
frappe.destroy()
@click.command('export-json')
@click.argument('doctype')
@click.argument('name')
@click.argument('path')
@pass_context
def export_json(context, doctype, name, path):
"Export doclist as json to the given path, use '-' as name for Singles."
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.export_json(doctype, path, name=name)
finally:
frappe.destroy()
@click.command('export-csv')
@click.argument('doctype')
@click.argument('path')
@pass_context
def export_csv(context, doctype, path):
"Dump DocType as csv"
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.export_csv(doctype, path)
finally:
frappe.destroy()
@click.command('export-fixtures')
@pass_context
def export_fixtures(context):
"export fixtures"
from frappe.utils.fixtures import export_fixtures
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
export_fixtures()
finally:
frappe.destroy()
@click.command('import-doc')
@click.argument('path')
@pass_context
def import_doc(context, path, force=False):
"Import (insert/update) doclist. If the argument is a directory, all files ending with .json are imported"
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.import_doc(path, overwrite=context.force)
finally:
frappe.destroy()
@click.command('import-csv')
@click.argument('path')
@click.option('--only-insert', default=False, is_flag=True, help='Do not overwrite existing records')
@click.option('--submit-after-import', default=False, is_flag=True, help='Submit document after importing it')
@click.option('--ignore-encoding-errors', default=False, is_flag=True, help='Ignore encoding errors while coverting to unicode')
@pass_context
def import_csv(context, path, only_insert=False, submit_after_import=False, ignore_encoding_errors=False):
"Import CSV using data import tool"
from frappe.core.page.data_import_tool import importer
from frappe.utils.csvutils import read_csv_content
site = get_single_site(context)
with open(path, 'r') as csvfile:
content = read_csv_content(csvfile.read())
frappe.init(site=site)
frappe.connect()
try:
importer.upload(content, submit_after_import=submit_after_import,
ignore_encoding_errors=ignore_encoding_errors, overwrite=not only_insert,
via_console=True)
frappe.db.commit()
except Exception:
print frappe.get_traceback()
frappe.destroy()
# translation
@click.command('build-message-files')
@pass_context
def build_message_files(context):
"Build message files for translation"
import frappe.translate
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.rebuild_all_translation_files()
finally:
frappe.destroy()
@click.command('get-untranslated')
@click.argument('lang')
@click.argument('untranslated_file')
@click.option('--all', default=False, is_flag=True, help='Get all message strings')
@pass_context
def get_untranslated(context, lang, untranslated_file, all=None):
"Get untranslated strings for language"
import frappe.translate
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.get_untranslated(lang, untranslated_file, get_all=all)
finally:
frappe.destroy()
@click.command('update-translations')
@click.argument('lang')
@click.argument('untranslated_file')
@click.argument('translated-file')
@pass_context
def update_translations(context, lang, untranslated_file, translated_file):
"Update translated strings"
import frappe.translate
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.update_translations(lang, untranslated_file, translated_file)
finally:
frappe.destroy()
@click.command('set-admin-password')
@click.argument('admin-password')
@pass_context
def set_admin_password(context, admin_password):
"Set Administrator password for a site"
import getpass
for site in context.sites:
try:
frappe.init(site=site)
while not admin_password:
admin_password = getpass.getpass("Administrator's password for {0}: ".format(site))
frappe.connect()
frappe.db.sql("""update __Auth set `password`=password(%s)
where user='Administrator'""", (admin_password,))
frappe.db.commit()
admin_password = None
finally:
frappe.destroy()
@click.command('mysql')
@pass_context
def mysql(context):
"Start Mariadb console for a site"
site = get_single_site(context)
frappe.init(site=site)
msq = find_executable('mysql')
os.execv(msq, [msq, '-u', frappe.conf.db_name, '-p'+frappe.conf.db_password, frappe.conf.db_name, '-h', frappe.conf.db_host or "localhost", "-A"])
@click.command('console')
@pass_context
def console(context):
"Start ipython console for a site"
site = get_single_site(context)
frappe.init(site=site)
frappe.connect()
frappe.local.lang = frappe.db.get_default("lang")
import IPython
IPython.embed()
@click.command('run-tests')
@click.option('--app')
@click.option('--doctype')
@click.option('--test', multiple=True)
@click.option('--driver')
@click.option('--module')
@pass_context
def run_tests(context, app=None, module=None, doctype=None, test=(), driver=None):
"Run tests"
import frappe.test_runner
from frappe.utils import sel
tests = test
site = get_single_site(context)
frappe.init(site=site)
if frappe.conf.run_selenium_tests and False:
sel.start(context.verbose, driver)
try:
ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force)
if len(ret.failures) == 0 and len(ret.errors) == 0:
ret = 0
finally:
pass
if frappe.conf.run_selenium_tests:
sel.close()
sys.exit(ret)
@click.command('serve')
@click.option('--port', default=8000)
@click.option('--profile', is_flag=True, default=False)
@pass_context
def serve(context, port=None, profile=False, sites_path='.', site=None):
"Start development web server"
if not context.sites:
site = None
else:
site = context.sites[0]
import frappe.app
frappe.app.serve(port=port, profile=profile, site=site, sites_path='.')
@click.command('request')
@click.argument('args')
@pass_context
def request(context, args):
"Run a request as an admin"
import frappe.handler
import frappe.api
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
if "?" in args:
frappe.local.form_dict = frappe._dict([a.split("=") for a in args.split("?")[-1].split("&")])
else:
frappe.local.form_dict = frappe._dict()
if args.startswith("/api/method"):
frappe.local.form_dict.cmd = args.split("?")[0].split("/")[-1]
frappe.handler.execute_cmd(frappe.form_dict.cmd)
print frappe.response
finally:
frappe.destroy()
@click.command('doctor')
def doctor():
"Get diagnostic info about background workers"
from frappe.utils.doctor import doctor as _doctor
frappe.init('')
return _doctor()
@click.command('celery-doctor')
@click.option('--site', help='site name')
def celery_doctor(site=None):
"Get diagnostic info about background workers"
from frappe.utils.doctor import celery_doctor as _celery_doctor
frappe.init('')
return _celery_doctor(site=site)
@click.command('purge-all-tasks')
def purge_all_tasks():
"Purge any pending periodic tasks of 'all' event. Doesn't purge hourly, daily and weekly"
frappe.init('')
from frappe.utils.doctor import purge_pending_tasks
count = purge_pending_tasks()
print "Purged {} tasks".format(count)
@click.command('dump-queue-status')
def dump_queue_status():
"Dump detailed diagnostic infomation for task queues in JSON format"
frappe.init('')
from frappe.utils.doctor import dump_queue_status as _dump_queue_status
print json.dumps(_dump_queue_status(), indent=1)
@click.command('make-app')
@click.argument('destination')
@click.argument('app_name')
def make_app(destination, app_name):
from frappe.utils.boilerplate import make_boilerplate
make_boilerplate(destination, app_name)
@click.command('use')
@click.argument('site')
def _use(site, sites_path='.'):
use(site, sites_path=sites_path)
def use(site, sites_path='.'):
with open(os.path.join(sites_path, "currentsite.txt"), "w") as sitefile:
sitefile.write(site)
@click.command('backup')
@click.option('--with-files', default=False, is_flag=True, help="Take backup with files")
@pass_context
def backup(context, with_files=False, backup_path_db=None, backup_path_files=None, quiet=False):
"Backup"
from frappe.utils.backups import scheduled_backup
verbose = context.verbose
for site in context.sites:
frappe.init(site=site)
frappe.connect()
odb = scheduled_backup(ignore_files=not with_files, backup_path_db=backup_path_db, backup_path_files=backup_path_files, force=True)
if verbose:
from frappe.utils import now
print "database backup taken -", odb.backup_path_db, "- on", now()
if with_files:
print "files backup taken -", odb.backup_path_files, "- on", now()
frappe.destroy()
@click.command('remove-from-installed-apps')
@click.argument('app')
@pass_context
def remove_from_installed_apps(context, app):
from frappe.installer import remove_from_installed_apps
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_from_installed_apps(app)
finally:
frappe.destroy()
@click.command('uninstall-app')
@click.argument('app')
@click.option('--dry-run', help='List all doctypes that will be deleted', is_flag=True, default=False)
@pass_context
def uninstall(context, app, dry_run=False):
from frappe.installer import remove_app
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_app(app, dry_run)
finally:
frappe.destroy()
def move(dest_dir, site):
import os
if not os.path.isdir(dest_dir):
raise Exception, "destination is not a directory or does not exist"
frappe.init(site)
old_path = frappe.utils.get_site_path()
new_path = os.path.join(dest_dir, site)
# check if site dump of same name already exists
site_dump_exists = True
count = 0
while site_dump_exists:
final_new_path = new_path + (count and str(count) or "")
site_dump_exists = os.path.exists(final_new_path)
count = int(count or 0) + 1
os.rename(old_path, final_new_path)
frappe.destroy()
return final_new_path
@click.command('set-config')
@click.argument('key')
@click.argument('value')
@pass_context
def set_config(context, key, value):
from frappe.installer import update_site_config
for site in context.sites:
frappe.init(site=site)
update_site_config(key, value)
frappe.destroy()
@click.command('drop-site')
@click.argument('site')
@click.option('--root-login', default='root')
@click.option('--root-password')
def drop_site(site, root_login='root', root_password=None):
from frappe.installer import get_current_host, make_connection
from frappe.model.db_schema import DbManager
from frappe.utils.backups import scheduled_backup
frappe.init(site=site)
frappe.connect()
scheduled_backup(ignore_files=False, force=True)
db_name = frappe.local.conf.db_name
frappe.local.db = make_connection(root_login, root_password)
dbman = DbManager(frappe.local.db)
dbman.delete_user(db_name, get_current_host())
dbman.drop_database(db_name)
archived_sites_dir = os.path.join(frappe.get_app_path('frappe'), '..', '..', '..', 'archived_sites')
if not os.path.exists(archived_sites_dir):
os.mkdir(archived_sites_dir)
move(archived_sites_dir, site)
@click.command('version')
@pass_context
def get_version(context):
frappe.init(site=context.sites[0])
for m in sorted(frappe.local.app_modules.keys()):
print "{0} {1}".format(m, frappe.get_module(m).__version__)
# commands = [
# new_site,
# restore,
# install_app,
# run_patch,
# migrate,
# add_system_manager,
# celery
# ]
commands = [
new_site,
restore,
reinstall,
install_app,
list_apps,
add_system_manager,
migrate,
run_patch,
reload_doc,
build,
watch,
clear_cache,
clear_website_cache,
destroy_all_sessions,
sync_www,
build_website,
setup_docs,
reset_perms,
execute,
celery,
trigger_scheduler_event,
enable_scheduler,
disable_scheduler,
export_doc,
export_json,
export_csv,
export_fixtures,
import_doc,
import_csv,
build_message_files,
get_untranslated,
update_translations,
set_admin_password,
mysql,
run_tests,
serve,
request,
doctor,
celery_doctor,
purge_all_tasks,
dump_queue_status,
console,
make_app,
_use,
backup,
remove_from_installed_apps,
uninstall,
drop_site,
set_config,
get_version,
]
|
|
from io import StringIO
import random
import string
import numpy as np
from pandas import Categorical, DataFrame, date_range, read_csv, to_datetime
from ..pandas_vb_common import BaseIO, tm
class ToCSV(BaseIO):
fname = "__test__.csv"
params = ["wide", "long", "mixed"]
param_names = ["kind"]
def setup(self, kind):
wide_frame = DataFrame(np.random.randn(3000, 30))
long_frame = DataFrame(
{
"A": np.arange(50000),
"B": np.arange(50000) + 1.0,
"C": np.arange(50000) + 2.0,
"D": np.arange(50000) + 3.0,
}
)
mixed_frame = DataFrame(
{
"float": np.random.randn(5000),
"int": np.random.randn(5000).astype(int),
"bool": (np.arange(5000) % 2) == 0,
"datetime": date_range("2001", freq="s", periods=5000),
"object": ["foo"] * 5000,
}
)
mixed_frame.loc[30:500, "float"] = np.nan
data = {"wide": wide_frame, "long": long_frame, "mixed": mixed_frame}
self.df = data[kind]
def time_frame(self, kind):
self.df.to_csv(self.fname)
class ToCSVDatetime(BaseIO):
fname = "__test__.csv"
def setup(self):
rng = date_range("1/1/2000", periods=1000)
self.data = DataFrame(rng, index=rng)
def time_frame_date_formatting(self):
self.data.to_csv(self.fname, date_format="%Y%m%d")
class ToCSVDatetimeBig(BaseIO):
fname = "__test__.csv"
timeout = 1500
params = [1000, 10000, 100000]
param_names = ["obs"]
def setup(self, obs):
d = "2018-11-29"
dt = "2018-11-26 11:18:27.0"
self.data = DataFrame(
{
"dt": [np.datetime64(dt)] * obs,
"d": [np.datetime64(d)] * obs,
"r": [np.random.uniform()] * obs,
}
)
def time_frame(self, obs):
self.data.to_csv(self.fname)
class StringIORewind:
def data(self, stringio_object):
stringio_object.seek(0)
return stringio_object
class ReadCSVDInferDatetimeFormat(StringIORewind):
params = ([True, False], ["custom", "iso8601", "ymd"])
param_names = ["infer_datetime_format", "format"]
def setup(self, infer_datetime_format, format):
rng = date_range("1/1/2000", periods=1000)
formats = {
"custom": "%m/%d/%Y %H:%M:%S.%f",
"iso8601": "%Y-%m-%d %H:%M:%S",
"ymd": "%Y%m%d",
}
dt_format = formats[format]
self.StringIO_input = StringIO("\n".join(rng.strftime(dt_format).tolist()))
def time_read_csv(self, infer_datetime_format, format):
read_csv(
self.data(self.StringIO_input),
header=None,
names=["foo"],
parse_dates=["foo"],
infer_datetime_format=infer_datetime_format,
)
class ReadCSVConcatDatetime(StringIORewind):
iso8601 = "%Y-%m-%d %H:%M:%S"
def setup(self):
rng = date_range("1/1/2000", periods=50000, freq="S")
self.StringIO_input = StringIO("\n".join(rng.strftime(self.iso8601).tolist()))
def time_read_csv(self):
read_csv(
self.data(self.StringIO_input),
header=None,
names=["foo"],
parse_dates=["foo"],
infer_datetime_format=False,
)
class ReadCSVConcatDatetimeBadDateValue(StringIORewind):
params = (["nan", "0", ""],)
param_names = ["bad_date_value"]
def setup(self, bad_date_value):
self.StringIO_input = StringIO((f"{bad_date_value},\n") * 50000)
def time_read_csv(self, bad_date_value):
read_csv(
self.data(self.StringIO_input),
header=None,
names=["foo", "bar"],
parse_dates=["foo"],
infer_datetime_format=False,
)
class ReadCSVSkipRows(BaseIO):
fname = "__test__.csv"
params = [None, 10000]
param_names = ["skiprows"]
def setup(self, skiprows):
N = 20000
index = tm.makeStringIndex(N)
df = DataFrame(
{
"float1": np.random.randn(N),
"float2": np.random.randn(N),
"string1": ["foo"] * N,
"bool1": [True] * N,
"int1": np.random.randint(0, N, size=N),
},
index=index,
)
df.to_csv(self.fname)
def time_skipprows(self, skiprows):
read_csv(self.fname, skiprows=skiprows)
class ReadUint64Integers(StringIORewind):
def setup(self):
self.na_values = [2 ** 63 + 500]
arr = np.arange(10000).astype("uint64") + 2 ** 63
self.data1 = StringIO("\n".join(arr.astype(str).tolist()))
arr = arr.astype(object)
arr[500] = -1
self.data2 = StringIO("\n".join(arr.astype(str).tolist()))
def time_read_uint64(self):
read_csv(self.data(self.data1), header=None, names=["foo"])
def time_read_uint64_neg_values(self):
read_csv(self.data(self.data2), header=None, names=["foo"])
def time_read_uint64_na_values(self):
read_csv(
self.data(self.data1), header=None, names=["foo"], na_values=self.na_values
)
class ReadCSVThousands(BaseIO):
fname = "__test__.csv"
params = ([",", "|"], [None, ","])
param_names = ["sep", "thousands"]
def setup(self, sep, thousands):
N = 10000
K = 8
data = np.random.randn(N, K) * np.random.randint(100, 10000, (N, K))
df = DataFrame(data)
if thousands is not None:
fmt = f":{thousands}"
fmt = "{" + fmt + "}"
df = df.applymap(lambda x: fmt.format(x))
df.to_csv(self.fname, sep=sep)
def time_thousands(self, sep, thousands):
read_csv(self.fname, sep=sep, thousands=thousands)
class ReadCSVComment(StringIORewind):
def setup(self):
data = ["A,B,C"] + (["1,2,3 # comment"] * 100000)
self.StringIO_input = StringIO("\n".join(data))
def time_comment(self):
read_csv(
self.data(self.StringIO_input), comment="#", header=None, names=list("abc")
)
class ReadCSVFloatPrecision(StringIORewind):
params = ([",", ";"], [".", "_"], [None, "high", "round_trip"])
param_names = ["sep", "decimal", "float_precision"]
def setup(self, sep, decimal, float_precision):
floats = [
"".join(random.choice(string.digits) for _ in range(28)) for _ in range(15)
]
rows = sep.join([f"0{decimal}" + "{}"] * 3) + "\n"
data = rows * 5
data = data.format(*floats) * 200 # 1000 x 3 strings csv
self.StringIO_input = StringIO(data)
def time_read_csv(self, sep, decimal, float_precision):
read_csv(
self.data(self.StringIO_input),
sep=sep,
header=None,
names=list("abc"),
float_precision=float_precision,
)
def time_read_csv_python_engine(self, sep, decimal, float_precision):
read_csv(
self.data(self.StringIO_input),
sep=sep,
header=None,
engine="python",
float_precision=None,
names=list("abc"),
)
class ReadCSVCategorical(BaseIO):
fname = "__test__.csv"
def setup(self):
N = 100000
group1 = ["aaaaaaaa", "bbbbbbb", "cccccccc", "dddddddd", "eeeeeeee"]
df = DataFrame(np.random.choice(group1, (N, 3)), columns=list("abc"))
df.to_csv(self.fname, index=False)
def time_convert_post(self):
read_csv(self.fname).apply(Categorical)
def time_convert_direct(self):
read_csv(self.fname, dtype="category")
class ReadCSVParseDates(StringIORewind):
def setup(self):
data = """{},19:00:00,18:56:00,0.8100,2.8100,7.2000,0.0000,280.0000\n
{},20:00:00,19:56:00,0.0100,2.2100,7.2000,0.0000,260.0000\n
{},21:00:00,20:56:00,-0.5900,2.2100,5.7000,0.0000,280.0000\n
{},21:00:00,21:18:00,-0.9900,2.0100,3.6000,0.0000,270.0000\n
{},22:00:00,21:56:00,-0.5900,1.7100,5.1000,0.0000,290.0000\n
"""
two_cols = ["KORD,19990127"] * 5
data = data.format(*two_cols)
self.StringIO_input = StringIO(data)
def time_multiple_date(self):
read_csv(
self.data(self.StringIO_input),
sep=",",
header=None,
names=list(string.digits[:9]),
parse_dates=[[1, 2], [1, 3]],
)
def time_baseline(self):
read_csv(
self.data(self.StringIO_input),
sep=",",
header=None,
parse_dates=[1],
names=list(string.digits[:9]),
)
class ReadCSVCachedParseDates(StringIORewind):
params = ([True, False],)
param_names = ["do_cache"]
def setup(self, do_cache):
data = ("\n".join(f"10/{year}" for year in range(2000, 2100)) + "\n") * 10
self.StringIO_input = StringIO(data)
def time_read_csv_cached(self, do_cache):
try:
read_csv(
self.data(self.StringIO_input),
header=None,
parse_dates=[0],
cache_dates=do_cache,
)
except TypeError:
# cache_dates is a new keyword in 0.25
pass
class ReadCSVMemoryGrowth(BaseIO):
chunksize = 20
num_rows = 1000
fname = "__test__.csv"
def setup(self):
with open(self.fname, "w") as f:
for i in range(self.num_rows):
f.write(f"{i}\n")
def mem_parser_chunks(self):
# see gh-24805.
result = read_csv(self.fname, chunksize=self.chunksize)
for _ in result:
pass
class ReadCSVParseSpecialDate(StringIORewind):
params = (["mY", "mdY", "hm"],)
param_names = ["value"]
objects = {
"mY": "01-2019\n10-2019\n02/2000\n",
"mdY": "12/02/2010\n",
"hm": "21:34\n",
}
def setup(self, value):
count_elem = 10000
data = self.objects[value] * count_elem
self.StringIO_input = StringIO(data)
def time_read_special_date(self, value):
read_csv(
self.data(self.StringIO_input),
sep=",",
header=None,
names=["Date"],
parse_dates=["Date"],
)
class ParseDateComparison(StringIORewind):
params = ([False, True],)
param_names = ["cache_dates"]
def setup(self, cache_dates):
count_elem = 10000
data = "12-02-2010\n" * count_elem
self.StringIO_input = StringIO(data)
def time_read_csv_dayfirst(self, cache_dates):
try:
read_csv(
self.data(self.StringIO_input),
sep=",",
header=None,
names=["Date"],
parse_dates=["Date"],
cache_dates=cache_dates,
dayfirst=True,
)
except TypeError:
# cache_dates is a new keyword in 0.25
pass
def time_to_datetime_dayfirst(self, cache_dates):
df = read_csv(
self.data(self.StringIO_input), dtype={"date": str}, names=["date"]
)
to_datetime(df["date"], cache=cache_dates, dayfirst=True)
def time_to_datetime_format_DD_MM_YYYY(self, cache_dates):
df = read_csv(
self.data(self.StringIO_input), dtype={"date": str}, names=["date"]
)
to_datetime(df["date"], cache=cache_dates, format="%d-%m-%Y")
from ..pandas_vb_common import setup # noqa: F401 isort:skip
|
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
__author__ = 'Li Li'
import numpy as np
import six
import json
from collections import OrderedDict
import copy
import os
from atom.api import (Atom, Str, observe, Typed,
Int, Dict, List, Float, Enum, Bool)
from skxray.fitting.background import snip_method
from skxray.constants.api import XrfElement as Element
from skxray.fitting.xrf_model import (ModelSpectrum, ParamController,
trim, construct_linear_model, linear_spectrum_fitting)
#from pyxrf.model.fit_spectrum import fit_strategy_list
import logging
logger = logging.getLogger(__name__)
bound_options = ['none', 'lohi', 'fixed', 'lo', 'hi']
fit_strategy_list = ['fit_with_tail', 'free_more',
'e_calibration', 'linear',
'adjust_element1', 'adjust_element2', 'adjust_element3']
class Parameter(Atom):
# todo make sure that these are the only valid bound types
bound_type = Enum(*bound_options)
min = Float(-np.inf)
max = Float(np.inf)
value = Float()
default_value = Float()
fit_with_tail = Enum(*bound_options)
free_more = Enum(*bound_options)
adjust_element1 = Enum(*bound_options)
adjust_element2 = Enum(*bound_options)
adjust_element3 = Enum(*bound_options)
e_calibration = Enum(*bound_options)
linear = Enum(*bound_options)
name = Str()
description = Str()
tool_tip = Str()
@observe('name', 'bound_type', 'min', 'max', 'value', 'default_value')
def update_displayed_name(self, changed):
pass
# print(changed)
def __repr__(self):
return ("Parameter(bound_type={}, min={}, max={}, value={}, "
"default={}, free_more={}, adjust_element1={}, "
"adjust_element2={}, adjust_element3={}, "
"e_calibration={}, linear={}, description={}, "
"toop_tip={}".format(
self.bound_type, self.min, self.max, self.value, self.default_value,
self.free_more, self.adjust_element1, self.adjust_element2,
self.adjust_element3, self.e_calibration,
self.linear, self.description, self.tool_tip))
def to_dict(self):
return {
'bound_type': self.bound_type,
'min': self.min,
'max': self.max,
'value': self.value,
'default_value': self.default_value,
'fit_with_tail': self.fit_with_tail,
'free_more': self.free_more,
'adjust_element1': self.adjust_element1,
'adjust_element2': self.adjust_element2,
'adjust_element3': self.adjust_element3,
'e_calibration': self.e_calibration,
'linear': self.linear,
'name': self.name,
'description': self.description,
'tool_tip': self.tool_tip,
}
def format_dict(parameter_object_dict, element_list):
"""
Format the dictionary that scikit-xray expects.
Parameters
----------
parameter_object_dict : dict
element_list : list
Need to be transferred to str first, then save it to dict
"""
param_dict = {key: value.to_dict() for key, value
in six.iteritems(parameter_object_dict)}
elo = param_dict.pop('energy_bound_low')['value']
ehi = param_dict.pop('energy_bound_high')['value']
non_fitting_values = {'non_fitting_values': {
'energy_bound_low': elo,
'energy_bound_high': ehi,
'element_list': ', '.join(element_list)
}}
param_dict.update(non_fitting_values)
return param_dict
def dict_to_param(param_dict):
"""
Transfer param dict to parameter object.
Parameters
param_dict : dict
fitting parameter
"""
temp_parameters = copy.deepcopy(param_dict)
non_fitting_values = temp_parameters.pop('non_fitting_values')
element_list = non_fitting_values.pop('element_list')
if not isinstance(element_list, list):
element_list = [e.strip(' ') for e in element_list.split(',')]
#self.element_list = element_list
elo = non_fitting_values.pop('energy_bound_low')
ehi = non_fitting_values.pop('energy_bound_high')
param = {
'energy_bound_low': Parameter(value=elo,
default_value=elo,
description='E low limit [keV]'),
'energy_bound_high': Parameter(value=ehi,
default_value=ehi,
description='E high limit [keV]')
}
for param_name, param_dict in six.iteritems(temp_parameters):
if 'default_value' in param_dict:
param.update({param_name: Parameter(**param_dict)})
else:
param.update({
param_name: Parameter(default_value=param_dict['value'],
**param_dict)
})
return element_list, param
class PreFitStatus(Atom):
"""
Data structure for pre fit analysis.
Attributes
----------
z : str
z number of element
spectrum : array
spectrum of given element
status : bool
True as plot is visible
stat_copy : bool
copy of status
maxv : float
max value of a spectrum
norm : float
norm value respect to the strongest peak
lbd_stat : bool
define plotting status under a threshold value
"""
z = Str()
energy = Str()
spectrum = Typed(np.ndarray)
status = Bool(False)
stat_copy = Bool(False)
maxv = Float()
norm = Float()
lbd_stat = Bool(False)
class ElementController(object):
"""
This class performs basic ways to rank elements, show elements,
calculate normed intensity, and etc.
"""
def __init__(self):
self.element_dict = OrderedDict()
def delete_item(self, k):
try:
del self.element_dict[k]
self.update_norm()
logger.info('Item {} is deleted.'.format(k))
except KeyError, e:
logger.info(e)
def order(self, option='z'):
"""
Order dict in different ways.
"""
if option == 'z':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[1].z))
elif option == 'energy':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[1].energy))
elif option == 'name':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[0]))
elif option == 'maxv':
self.element_dict = OrderedDict(sorted(six.iteritems(self.element_dict),
key=lambda t: t[1].maxv, reverse=True))
def add_to_dict(self, dictv):
self.element_dict.update(dictv)
self.update_norm()
def update_norm(self, threshv=0.1):
"""
Calculate the norm intensity for each element peak.
Parameters
----------
threshv : float
No value is shown when smaller than the shreshold value
"""
#max_dict = reduce(max, map(np.max, six.itervalues(self.element_dict)))
max_dict = np.max(np.array([v.maxv for v in six.itervalues(self.element_dict)]))
for v in six.itervalues(self.element_dict):
v.norm = v.maxv/max_dict*100
v.lbd_stat = bool(v.norm > threshv)
def delete_all(self):
self.element_dict.clear()
def get_element_list(self):
current_elements = [v for v in six.iterkeys(self.element_dict) if v.lower() != v]
logger.info('Current Elements for fitting are {}'.format(current_elements))
return current_elements
def update_peak_ratio(self):
"""
In case users change the max value.
"""
for v in six.itervalues(self.element_dict):
v.maxv = np.around(v.maxv, 1)
v.spectrum = v.spectrum*v.maxv/np.max(v.spectrum)
self.update_norm()
def turn_on_all(self, option=True):
"""
Set plotting status on for all lines.
"""
if option:
_plot = option
else:
_plot = False
for v in six.itervalues(self.element_dict):
v.status = _plot
class GuessParamModel(Atom):
"""
This is auto fit model to guess the initial parameters.
Attributes
----------
parameters : `atom.Dict`
A list of `Parameter` objects, subclassed from the `Atom` base class.
These `Parameter` objects hold all relevant xrf information.
data : array
1D array of spectrum
prefit_x : array
xX axis with range defined by low and high limits.
result_dict : dict
Save all the auto fitting results for each element.
It is a dictionary of object PreFitStatus.
param_d : dict
Parameters can be transferred into this dictionary.
param_new : dict
More information are saved, such as element position and width.
total_y : dict
Results from k lines
total_y_l : dict
Results from l lines
total_y_m : dict
Results from l lines
e_list : str
All elements used for fitting.
file_path : str
The path where file is saved.
element_list : list
"""
default_parameters = Dict()
#parameters = Dict() #Typed(OrderedDict) #OrderedDict()
data = Typed(object)
prefit_x = Typed(object)
result_dict = Typed(object) #Typed(OrderedDict)
result_dict_names = List()
#param_d = Dict()
param_new = Dict()
total_y = Dict()
total_y_l = Dict()
total_y_m = Dict()
e_name = Str()
add_element_intensity = Float()
#save_file = Str()
result_folder = Str()
#file_path = Str()
element_list = List()
data_sets = Typed(OrderedDict)
file_opt = Int()
data_all = Typed(np.ndarray)
EC = Typed(object)
def __init__(self, *args, **kwargs):
try:
self.default_parameters = kwargs['default_parameters']
#self.element_list, self.parameters = dict_to_param(self.default_parameters)
self.param_new = copy.deepcopy(self.default_parameters)
self.element_list = get_element(self.param_new)
#self.get_param(default_parameters)
except ValueError:
logger.info('No default parameter files are chosen.')
self.result_folder = kwargs['working_directory']
self.EC = ElementController()
def get_new_param(self, param_path):
"""
Update parameters if new param_path is given.
Parameters
----------
param_path : str
path to save the file
"""
with open(param_path, 'r') as json_data:
self.param_new = json.load(json_data)
#self.element_list, self.parameters = dict_to_param(self.param_new)
self.element_list = get_element(self.param_new)
self.EC.delete_all()
self.create_spectrum_from_file(self.param_new, self.element_list)
logger.info('Elements read from file are: {}'.format(self.element_list))
#self.element_list, self.parameters = self.get_param(new_param)
def create_spectrum_from_file(self, param_dict, elemental_lines):
"""
Create spectrum profile with given param dict from file.
Parameters
----------
param_dict : dict
dict obtained from file
elemental_lines : list
e.g., ['Na_K', Mg_K', 'Pt_M'] refers to the
K lines of Sodium, the K lines of Magnesium, and the M
lines of Platinum
"""
self.prefit_x, pre_dict = calculate_profile(self.data,
param_dict, elemental_lines)
#factor_to_area = factor_height2area()
temp_dict = OrderedDict()
for e in six.iterkeys(pre_dict):
ename = e.split('_')[0]
for k, v in six.iteritems(param_dict):
if ename in k and 'area' in k:
energy = float(get_energy(e))
factor_to_area = factor_height2area(energy, self.param_new)
ratio = v['value']/factor_to_area
spectrum = pre_dict[e] #/ np.max(pre_dict[e]) * ratio
elif ename == 'compton' and k == 'compton_amplitude':
# the rest-mass energy of an electron (511 keV)
mc2 = 511
comp_denom = (1 + self.param_new['coherent_sct_energy']['value']
/ mc2 * (1 - np.cos(np.deg2rad(self.param_new['compton_angle']['value']))))
compton_energy = self.param_new['coherent_sct_energy']['value'] / comp_denom
factor_to_area = factor_height2area(compton_energy, self.param_new,
std_correction=self.param_new['compton_fwhm_corr']['value'])
spectrum = pre_dict[e] #/ np.max(pre_dict[e]) * ratio
elif ename == 'elastic' and k == 'coherent_sct_amplitude':
factor_to_area = factor_height2area(self.param_new['coherent_sct_energy']['value'],
self.param_new)
ratio = v['value']/factor_to_area
spectrum = pre_dict[e] #/ np.max(pre_dict[e]) * ratio
elif ename == 'background':
spectrum = pre_dict[e]
else:
continue
ps = PreFitStatus(z=get_Z(ename), energy=get_energy(e), spectrum=spectrum,
maxv=np.around(np.max(spectrum), 1),
norm=-1, lbd_stat=False)
temp_dict.update({e: ps})
self.EC.add_to_dict(temp_dict)
@observe('file_opt')
def choose_file(self, change):
if self.file_opt == 0:
return
names = self.data_sets.keys()
self.data = self.data_sets[names[self.file_opt-1]].get_sum()
self.data_all = self.data_sets[names[self.file_opt-1]].raw_data
def manual_input(self):
default_area = 1e5
logger.info('Element {} is added'.format(self.e_name))
#param_dict = format_dict(self.parameters, self.element_list)
x, data_out = calculate_profile(self.data, self.param_new,
elemental_lines=[self.e_name], default_area=default_area)
ps = PreFitStatus(z=get_Z(self.e_name), energy=get_energy(self.e_name),
spectrum=data_out[self.e_name]/np.max(data_out[self.e_name])*self.add_element_intensity,
maxv=self.add_element_intensity, norm=-1,
lbd_stat=False)
self.EC.add_to_dict({self.e_name: ps})
def update_name_list(self):
"""
When result_dict_names change, the looper in enaml will update.
"""
# need to clean list first, in order to refresh the list in GUI
self.result_dict_names = []
self.result_dict_names = self.EC.element_dict.keys()
logger.info('Current element names are {}'.format(self.result_dict_names))
def find_peak(self, threshv=0.1):
"""
Run automatic peak finding, and save results as dict of object.
"""
#param_dict = format_dict(self.parameters, self.element_list)
self.prefit_x, out_dict = linear_spectrum_fitting(self.data,
self.param_new)
logger.info('Energy range: {}, {}'.format(self.param_new['non_fitting_values']['energy_bound_low']['value'],
self.param_new['non_fitting_values']['energy_bound_high']['value']))
#max_dict = reduce(max, map(np.max, six.itervalues(out_dict)))
prefit_dict = OrderedDict()
for k, v in six.iteritems(out_dict):
ps = PreFitStatus(z=get_Z(k), energy=get_energy(k), spectrum=v,
maxv=np.around(np.max(v), 1), norm=-1,
lbd_stat=False)
prefit_dict.update({k: ps})
logger.info('The elements from parameter guess: {}'.format(
prefit_dict.keys()))
self.EC.add_to_dict(prefit_dict)
def create_full_param(self, peak_std=0.07):
"""
Extend the param to full param dict with detailed elements
information, and assign initial values from pre fit.
Parameters
----------
peak_std : float
approximated std for element peak.
"""
self.element_list = self.EC.get_element_list()
self.param_new['non_fitting_values']['element_list'] = ', '.join(self.element_list)
#param_d = format_dict(self.parameters, self.element_list)
self.param_new = param_dict_cleaner(self.param_new, self.element_list)
print('element list before register: {}'.format(self.element_list))
# create full parameter list including elements
PC = ParamController(self.param_new, self.element_list)
#PC.create_full_param()
self.param_new = PC.params
# to create full param dict, for GUI only
create_full_dict(self.param_new, fit_strategy_list)
logger.info('full dict: {}'.format(self.param_new.keys()))
logger.info('incident energy: {}'.format(self.param_new['coherent_sct_energy']['value']))
# update according to pre fit results
if len(self.EC.element_dict):
for e in self.element_list:
zname = e.split('_')[0]
for k, v in six.iteritems(self.param_new):
if zname in k and 'area' in k:
factor_to_area = factor_height2area(float(self.EC.element_dict[e].energy),
self.param_new)
v['value'] = self.EC.element_dict[e].maxv * factor_to_area
if 'compton' in self.EC.element_dict:
gauss_factor = 1/(1 + self.param_new['compton_f_step']['value']
+ self.param_new['compton_f_tail']['value']
+ self.param_new['compton_hi_f_tail']['value'])
# the rest-mass energy of an electron (511 keV)
mc2 = 511
comp_denom = (1 + self.param_new['coherent_sct_energy']['value']
/ mc2 * (1 - np.cos(np.deg2rad(self.param_new['compton_angle']['value']))))
compton_energy = self.param_new['coherent_sct_energy']['value'] / comp_denom
factor_to_area = factor_height2area(compton_energy, self.param_new,
std_correction=self.param_new['compton_fwhm_corr']['value'])
self.param_new['compton_amplitude']['value'] = \
self.EC.element_dict['compton'].maxv * factor_to_area
if 'coherent_sct_amplitude' in self.EC.element_dict:
self.param_new['coherent_sct_amplitude']['value'] = np.sum(
self.EC.element_dict['elastic'].spectrum)
def data_for_plot(self):
"""
Save data in terms of K, L, M lines for plot.
"""
self.total_y = {}
self.total_y_l = {}
self.total_y_m = {}
new_dict = {k: v for (k, v) in six.iteritems(self.EC.element_dict) if v.status}
for k, v in six.iteritems(new_dict):
if 'K' in k:
self.total_y[k] = self.EC.element_dict[k].spectrum
elif 'L' in k:
self.total_y_l[k] = self.EC.element_dict[k].spectrum
elif 'M' in k:
self.total_y_m[k] = self.EC.element_dict[k].spectrum
else:
self.total_y[k] = self.EC.element_dict[k].spectrum
def save(self, fname='param_default1.json'):
"""
Save full param dict into a file at result directory.
The name of the file is predefined.
Parameters
----------
fname : str, optional
file name to save updated parameters
"""
fpath = os.path.join(self.result_folder, fname)
with open(fpath, 'w') as outfile:
json.dump(self.param_new, outfile,
sort_keys=True, indent=4)
def read_pre_saved(self, fname='param_default1.json'):
"""This is a bad idea."""
fpath = os.path.join(self.result_folder, fname)
with open(fpath, 'r') as infile:
data = json.load(infile)
return data
def save_as(file_path, data):
"""
Save full param dict into a file.
"""
with open(file_path, 'w') as outfile:
json.dump(data, outfile,
sort_keys=True, indent=4)
def calculate_profile(y0, param,
elemental_lines, default_area=1e5):
# Need to use deepcopy here to avoid unexpected change on parameter dict
fitting_parameters = copy.deepcopy(param)
x0 = np.arange(len(y0))
# ratio to transfer energy value back to channel value
approx_ratio = 100
lowv = fitting_parameters['non_fitting_values']['energy_bound_low']['value'] * approx_ratio
highv = fitting_parameters['non_fitting_values']['energy_bound_high']['value'] * approx_ratio
x, y = trim(x0, y0, lowv, highv)
e_select, matv = construct_linear_model(x, fitting_parameters,
elemental_lines,
default_area=default_area)
non_element = ['compton', 'elastic']
total_list = e_select + non_element
total_list = [str(v) for v in total_list]
temp_d = {k: v for (k, v) in zip(total_list, matv.transpose())}
# get background
bg = snip_method(y, fitting_parameters['e_offset']['value'],
fitting_parameters['e_linear']['value'],
fitting_parameters['e_quadratic']['value'])
temp_d.update(background=bg)
#for i in len(total_list):
# temp_d[total_list[i]] = matv[:, i]
x = (fitting_parameters['e_offset']['value']
+ fitting_parameters['e_linear']['value'] * x
+ fitting_parameters['e_quadratic']['value'] * x**2)
return x, temp_d
def create_full_dict(param, name_list):
"""
Create full param dict so each item has same nested dict.
This is for GUI purpose only.
.. warning :: This function mutates the input values.
Pamameters
----------
param : dict
all parameters including element
name_list : list
strategy names
"""
for n in name_list:
for k, v in six.iteritems(param):
if k == 'non_fitting_values':
continue
if n not in v:
v.update({n: v['bound_type']})
def get_Z(ename):
"""
Return element's Z number.
Parameters
----------
ename : str
element name
Returns
-------
int or None
element Z number
"""
strip_line = lambda ename: ename.split('_')[0]
non_element = ['compton', 'elastic', 'background']
if ename in non_element:
return '-'
else:
e = Element(strip_line(ename))
return str(e.Z)
def get_energy(ename):
strip_line = lambda ename: ename.split('_')[0]
non_element = ['compton', 'elastic', 'background']
if ename in non_element:
return '-'
else:
e = Element(strip_line(ename))
if '_K' in ename:
energy = e.emission_line['ka1']
elif '_L' in ename:
energy = e.emission_line['la1']
elif '_M' in ename:
energy = e.emission_line['ma1']
return str(np.around(energy, 4))
def get_element(param):
element_list = param['non_fitting_values']['element_list']
return [e.strip(' ') for e in element_list.split(',')]
def factor_height2area(energy, param, std_correction=1):
"""
Factor to transfer peak height to area.
"""
temp_val = 2 * np.sqrt(2 * np.log(2))
epsilon = param['non_fitting_values']['electron_hole_energy']
sigma = np.sqrt((param['fwhm_offset']['value'] / temp_val)**2
+ energy * epsilon * param['fwhm_fanoprime']['value'])
return sigma*std_correction
def param_dict_cleaner(param, element_list):
"""
Make sure param only contains element from element_list.
Parameters
----------
param : dict
fitting parameters
element_list : list
list of elemental lines
Returns
-------
dict :
new param dict containing given elements
"""
param_new = {}
for k, v in six.iteritems(param):
if k == 'non_fitting_values' or k == k.lower():
param_new.update({k: v})
else:
if k[:2] in element_list:
param_new.update({k: v})
return param_new
|
|
import logging
import ailment
from ..analysis import Analysis
from .structurer import SequenceNode, CodeNode, MultiNode, LoopNode, ConditionNode
l = logging.getLogger(name=__name__)
class RegionSimplifier(Analysis):
def __init__(self, region):
self.region = region
self.result = None
# Initialize handler map
self.GOTO_HANDLERS = {
SequenceNode: self._goto_handle_sequencenode,
CodeNode: self._goto_handle_codenode,
MultiNode: self._goto_handle_multinode,
LoopNode: self._goto_handle_loopnode,
ConditionNode: self._goto_handle_conditionnode,
ailment.Block: self._goto_handle_block,
}
self.IFS_HANDLERS = {
SequenceNode: self._ifs_handle_sequencenode,
CodeNode: self._ifs_handle_codenode,
MultiNode: self._ifs_handle_multinode,
LoopNode: self._ifs_handle_loopnode,
ConditionNode: self._ifs_handle_conditionnode,
ailment.Block: self._ifs_handle_block,
}
self._simplify()
def _simplify(self):
"""
RegionSimplifier performs the following simplifications:
- Remove redundant Gotos
- Remove redundant If/If-else statements
"""
r = self.region
r = self._simplify_gotos(r)
r = self._simplify_ifs(r)
self.result = r
#
# Simplifiers
#
# Goto simplifier
def _simplify_gotos(self, region):
self._goto_handle(region, None)
return region
def _goto_handle(self, node, successor):
handler = self.GOTO_HANDLERS.get(node.__class__, None)
if handler is not None:
handler(node, successor)
def _goto_handle_sequencenode(self, node, successor):
"""
:param SequenceNode node:
:return:
"""
for n0, n1 in zip(node.nodes, node.nodes[1:] + [successor]):
self._goto_handle(n0, n1)
def _goto_handle_codenode(self, node, successor):
"""
:param CodeNode node:
:return:
"""
self._goto_handle(node.node, successor)
def _goto_handle_conditionnode(self, node, successor):
"""
:param ConditionNode node:
:param successor:
:return:
"""
if node.true_node is not None:
self._goto_handle(node.true_node, successor)
if node.false_node is not None:
self._goto_handle(node.false_node, successor)
def _goto_handle_loopnode(self, node, successor):
"""
:param LoopNode node:
:param successor:
:return:
"""
self._goto_handle(node.sequence_node, successor)
def _goto_handle_multinode(self, node, successor):
"""
:param MultiNode node:
:return:
"""
for n0, n1 in zip(node.nodes, node.nodes[1:] + [successor]):
self._goto_handle(n0, n1)
def _goto_handle_block(self, block, successor): # pylint:disable=no-self-use
"""
:param ailment.Block block:
:return:
"""
if block.statements and isinstance(block.statements[-1], ailment.Stmt.Jump):
goto_stmt = block.statements[-1] # ailment.Stmt.Jump
if successor and isinstance(goto_stmt.target, ailment.Expr.Const) \
and goto_stmt.target.value == successor.addr:
# we can remove this statement
block.statements = block.statements[:-1]
# Ifs simplifier
def _simplify_ifs(self, region):
self._ifs_handle(region, None)
return region
def _ifs_handle(self, node, successor):
handler = self.IFS_HANDLERS.get(node.__class__, None)
if handler is not None:
handler(node, successor)
def _ifs_handle_sequencenode(self, node, successor):
"""
:param SequenceNode node:
:return:
"""
for n0, n1 in zip(node.nodes, node.nodes[1:] + [successor]):
self._ifs_handle(n0, n1)
def _ifs_handle_codenode(self, node, successor):
"""
:param CodeNode node:
:return:
"""
self._ifs_handle(node.node, successor)
def _ifs_handle_conditionnode(self, node, successor):
"""
:param ConditionNode node:
:param successor:
:return:
"""
if node.true_node is not None:
self._ifs_handle(node.true_node, successor)
if node.false_node is not None:
self._ifs_handle(node.false_node, successor)
def _ifs_handle_loopnode(self, node, successor):
"""
:param LoopNode node:
:param successor:
:return:
"""
self._ifs_handle(node.sequence_node, successor)
def _ifs_handle_multinode(self, node, successor):
"""
:param MultiNode node:
:return:
"""
for n0, n1 in zip(node.nodes, node.nodes[1:] + [successor]):
self._ifs_handle(n0, n1)
def _ifs_handle_block(self, block, successor): # pylint:disable=no-self-use
"""
:param ailment.Block block:
:return:
"""
if block.statements and isinstance(block.statements[-1], ailment.Stmt.ConditionalJump):
cond_stmt = block.statements[-1] # ailment.Stmt.ConditionalJump
if isinstance(successor, ConditionNode):
true_cond = False
if cond_stmt.true_target is not None and successor.true_node is not None:
# True branch exists. Test if the true target is the address
if cond_stmt.true_target.value == successor.true_node.addr:
true_cond = True
if cond_stmt.true_target is not None and successor.false_node is not None:
# True branch exists. Test if the true target is the address
if cond_stmt.true_target.value == successor.false_node.addr:
true_cond = True
false_cond = False
if cond_stmt.false_target is not None and successor.false_node is not None:
# False branch exists. Test if the false target is the address
if cond_stmt.false_target.value == successor.false_node.addr:
false_cond = True
if cond_stmt.false_target is not None and successor.true_node is not None:
# True branch exists. Test if the true target is the address
if cond_stmt.false_target.value == successor.true_node.addr:
false_cond = True
if true_cond or false_cond:
# We can safely remove this statement
block.statements = block.statements[:-1]
else:
l.error("An unexpected successor %s follows the conditional statement %s.",
successor, cond_stmt
)
from ...analyses import AnalysesHub
AnalysesHub.register_default('RegionSimplifier', RegionSimplifier)
|
|
# Copyright 2008-2009 ITA Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RRDTool Graphing"""
import os
import stat
import tempfile
try:
import rrdtool
import twirrdy
except ImportError:
rrdtool = None
import coil
from nagcat import errors
def available():
"""Returns False if rrdtool is not available"""
return rrdtool is not None
class Colorator(object):
"""A helper for picking graph colors"""
COLORS = ('#002A8F','#DA4725','#008A6D','#00BD27','#CCBB00','#F24AC8')
def __init__(self):
self.color_idx = 0
def next(self):
self.color_idx += 1
return Colorator.COLORS[(self.color_idx - 1) % len(Colorator.COLORS)]
def rrd_esc(string):
"""Escape : so rrd arguments parse properly"""
return string.replace(':', r'\:')
class Graph(object):
def __init__(self, dir, host, rrd, period="day"):
api = twirrdy.RRDBasicAPI()
self.rrd = rrd
self.host = host
path = "%s/%s/%s" % (dir, host, rrd)
self.rrd_path = "%s.rrd" % path
self.info = api.info(self.rrd_path)
self.color = Colorator()
self.period = period
try:
coil_fd = open("%s.coil" % path)
try:
coil_stat = os.fstat(coil_fd.fileno())
self.private = not (coil_stat.st_mode & stat.S_IROTH)
self.conf = coil.parse(coil_fd.read())
finally:
coil_fd.close()
except (IOError, OSError), ex:
raise errors.InitError("Unable to read coil file: %s" % ex)
if period not in ('day', 'week', 'month', 'year'):
raise ValueError("Invalid period parameter")
self.args = []
self.ds = []
self._init_args()
self._init_ds()
self._init_ds_args()
def _init_ds(self):
"""Find the data sources referred to in the coil config."""
self.ds.append('_state')
if self.conf.get('trend.type', False):
self.ds.append('_result')
if self.conf['query.type'] == 'compound':
for name, sub in self.conf['query'].iteritems():
if (isinstance(sub, coil.struct.Struct) and
sub.get('trend.type', False)):
self.ds.append(name)
elif self.conf.get('query.trend.type', False):
self.ds.append('query')
def _init_args(self):
"""Build the initial part of the rrdgraph args
before the data source arguments
"""
title = self.conf.get('trend.title',
"%s - %s" % (self.host, self.rrd))
axis_min = self.conf.get('trend.axis_min', "0")
axis_max = self.conf.get('trend.axis_max', None)
axis_label = self.conf.get('trend.axis_label', None)
base = self.conf.get('trend.base', 1000)
self.args = ["-s", "-1%s" % self.period,
"--title", title,
"--alt-autoscale-max", "--alt-y-grid",
"--lower-limit", str(axis_min)]
if axis_max:
self.args += ["--upper-limit", str(axis_max)]
if axis_label:
self.args += ["--vertical-label", str(axis_label)]
if base:
self.args += ["--base", str(base)]
# Add the _state ds that all of them have
self.args += ["DEF:_state=%s:_state:MAX" % rrd_esc(self.rrd_path),
"CDEF:_state_ok=_state,0,EQ",
"CDEF:_state_warn=_state,1,EQ",
"CDEF:_state_crit=_state,2,EQ",
"CDEF:_state_unkn=_state,3,EQ",
"TICK:_state_ok#ddffcc:1.0:Ok",
"TICK:_state_warn#ffffcc:1.0:Warning",
"TICK:_state_crit#ffcccc:1.0:Critical",
"TICK:_state_unkn#ffcc55:1.0:Unknown\\n"]
def _init_ds_args(self):
"""Build the rrdgraph args for all the known data sources"""
extra = set(self.ds)
for ds in self.ds:
if ds not in self.info['ds']:
self.args.append("COMMENT:WARNING\: Missing DS %s\\n" % ds)
continue
extra.remove(ds)
if ds == '_state':
continue
self.args += self._one_ds_args(ds)
for ds in extra:
self.args.append("COMMENT:WARNING\: Unexpected DS %s\\n" % ds)
def _one_ds_args(self, ds):
"""Build the arguments for a single data source"""
args = []
if ds == "_result":
dsconf = self.conf
label = dsconf.get('trend.label', dsconf.get('label', 'Result'))
default_color = "#000000"
elif ds == 'query' and self.conf['query.type'] != 'compound':
dsconf = self.conf
label = dsconf.get('query.trend.label',
dsconf.get('query.label', dsconf.get('label', 'Result')))
default_color = "#000000"
else:
dsconf = self.conf['query'][ds]
label = dsconf.get('trend.label',
dsconf.get('label', ds.capitalize()))
default_color = self.color.next()
scale = float(dsconf.get('trend.scale', 0))
if scale:
args.append("DEF:_raw_%s=%s:%s:AVERAGE" %
(ds, rrd_esc(self.rrd_path), ds))
args.append("CDEF:%s=_raw_%s,%d,*" % (ds, ds, scale))
else:
args.append("DEF:%s=%s:%s:AVERAGE" %
(ds, rrd_esc(self.rrd_path), ds))
if dsconf.get('trend.stack', False):
stack = "STACK"
else:
stack = ""
color = dsconf.get('trend.color', default_color)
display = dsconf.get('trend.display', 'line').lower()
if display == 'area':
args.append("AREA:%s%s:%s:%s" % (ds, color, rrd_esc(label), stack))
elif display == 'line':
args.append("LINE2:%s%s:%s:%s" % (ds, color, rrd_esc(label), stack))
else:
raise ValueError("Invalid display value")
prefix = max(7 - len(label), 0) * " "
args.append("VDEF:_last_%s=%s,LAST" % (ds, ds))
args.append("VDEF:_avg_%s=%s,AVERAGE" % (ds, ds))
args.append("VDEF:_max_%s=%s,MAXIMUM" % (ds, ds))
args.append("GPRINT:_last_%s:%sCurrent\\:%%8.2lf%%s" % (ds, prefix))
args.append("GPRINT:_avg_%s:Average\\:%%8.2lf%%s" % ds)
args.append("GPRINT:_max_%s:Maximum\\:%%8.2lf%%s\\n" % ds)
return args
def graph(self, width=500, height=120):
fd, path = tempfile.mkstemp('.png')
try:
rrdtool.graph(path, "-a", "PNG", "--width", str(width),
"--height", str(height), *self.args)
except:
os.close(fd)
raise
finally:
os.unlink(path)
fd = os.fdopen(fd)
png = fd.read()
fd.close()
return png
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_concurrency import processutils as putils
from cinder import context
from cinder import exception
from cinder.tests.unit.targets import targets_fixture as tf
from cinder import utils
from cinder.volume.targets import lio
class TestLioAdmDriver(tf.TargetDriverFixture):
def setUp(self):
super(TestLioAdmDriver, self).setUp()
with mock.patch.object(lio.LioAdm, '_verify_rtstool'):
self.target = lio.LioAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
self.target.db = mock.MagicMock(
volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'})
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
def test_get_target(self, mexecute, mpersist_cfg, mlock_exec):
mexecute.return_value = (self.test_vol, None)
self.assertEqual(self.test_vol, self.target._get_target(self.test_vol))
self.assertFalse(mpersist_cfg.called)
expected_args = ('cinder-rtstool', 'get-targets')
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
mexecute.assert_called_once_with(*expected_args, run_as_root=True)
def test_get_iscsi_target(self):
ctxt = context.get_admin_context()
expected = 0
self.assertEqual(expected,
self.target._get_iscsi_target(ctxt,
self.testvol['id']))
def test_get_target_and_lun(self):
lun = 0
iscsi_target = 0
ctxt = context.get_admin_context()
expected = (iscsi_target, lun)
self.assertEqual(expected,
self.target._get_target_and_lun(ctxt, self.testvol))
def test_get_target_chap_auth(self):
ctxt = context.get_admin_context()
self.assertEqual(('foo', 'bar'),
self.target._get_target_chap_auth(ctxt,
self.test_vol))
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
@mock.patch.object(lio.LioAdm, '_get_target')
def test_create_iscsi_target(self, mget_target, mexecute, mpersist_cfg,
mlock_exec):
mget_target.return_value = 1
# create_iscsi_target sends volume_name instead of volume_id on error
self.assertEqual(
1,
self.target.create_iscsi_target(
self.test_vol,
1,
0,
self.fake_volumes_dir))
mpersist_cfg.assert_called_once_with(self.volume_name)
mexecute.assert_called_once_with(
'cinder-rtstool',
'create',
self.fake_volumes_dir,
self.test_vol,
'',
'',
self.target.iscsi_protocol == 'iser',
run_as_root=True)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch.object(utils, 'execute')
@mock.patch.object(lio.LioAdm, '_get_target', return_value=1)
def test_create_iscsi_target_port_ip(self, mget_target, mexecute,
mpersist_cfg, mlock_exec):
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
ip = '10.0.0.15'
port = 3261
self.assertEqual(
1,
self.target.create_iscsi_target(
name=test_vol,
tid=1,
lun=0,
path=self.fake_volumes_dir,
**{'portals_port': port, 'portals_ips': [ip]}))
expected_args = (
'cinder-rtstool',
'create',
self.fake_volumes_dir,
test_vol,
'',
'',
self.target.iscsi_protocol == 'iser',
'-p%s' % port,
'-a' + ip)
mlock_exec.assert_any_call(*expected_args, run_as_root=True)
mexecute.assert_any_call(*expected_args, run_as_root=True)
mpersist_cfg.assert_called_once_with(self.volume_name)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch.object(utils, 'execute')
@mock.patch.object(lio.LioAdm, '_get_target', return_value=1)
def test_create_iscsi_target_port_ips(self, mget_target, mexecute,
mpersist_cfg, mlock_exec):
test_vol = 'iqn.2010-10.org.openstack:'\
'volume-83c2e877-feed-46be-8435-77884fe55b45'
ips = ['10.0.0.15', '127.0.0.1']
port = 3261
self.assertEqual(
1,
self.target.create_iscsi_target(
name=test_vol,
tid=1,
lun=0,
path=self.fake_volumes_dir,
**{'portals_port': port, 'portals_ips': ips}))
expected_args = (
'cinder-rtstool',
'create',
self.fake_volumes_dir,
test_vol,
'',
'',
self.target.iscsi_protocol == 'iser',
'-p%s' % port,
'-a' + ','.join(ips))
mlock_exec.assert_any_call(*expected_args, run_as_root=True)
mexecute.assert_any_call(*expected_args, run_as_root=True)
mpersist_cfg.assert_called_once_with(self.volume_name)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute',
side_effect=putils.ProcessExecutionError)
@mock.patch.object(lio.LioAdm, '_get_target')
def test_create_iscsi_target_already_exists(self, mget_target, mexecute,
mpersist_cfg, mlock_exec):
chap_auth = ('foo', 'bar')
self.assertRaises(exception.ISCSITargetCreateFailed,
self.target.create_iscsi_target,
self.test_vol,
1,
0,
self.fake_volumes_dir,
chap_auth)
self.assertFalse(mpersist_cfg.called)
expected_args = ('cinder-rtstool', 'create', self.fake_volumes_dir,
self.test_vol, chap_auth[0], chap_auth[1], False)
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
mexecute.assert_called_once_with(*expected_args, run_as_root=True)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
def test_remove_iscsi_target(self, mexecute, mpersist_cfg, mlock_exec):
# Test the normal case
self.target.remove_iscsi_target(0,
0,
self.testvol['id'],
self.testvol['name'])
expected_args = ('cinder-rtstool', 'delete',
self.iscsi_target_prefix + self.testvol['name'])
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
mexecute.assert_called_once_with(*expected_args, run_as_root=True)
mpersist_cfg.assert_called_once_with(self.fake_volume_id)
# Test the failure case: putils.ProcessExecutionError
mlock_exec.reset_mock()
mpersist_cfg.reset_mock()
mexecute.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetRemoveFailed,
self.target.remove_iscsi_target,
0,
0,
self.testvol['id'],
self.testvol['name'])
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
# Ensure there have been no calls to persist configuration
self.assertFalse(mpersist_cfg.called)
@mock.patch.object(lio.LioAdm, '_get_target_chap_auth')
@mock.patch.object(lio.LioAdm, 'create_iscsi_target')
def test_ensure_export(self, _mock_create, mock_get_chap):
ctxt = context.get_admin_context()
mock_get_chap.return_value = ('foo', 'bar')
self.target.ensure_export(ctxt,
self.testvol,
self.fake_volumes_dir)
_mock_create.assert_called_once_with(
self.iscsi_target_prefix + 'testvol',
0, 0, self.fake_volumes_dir, ('foo', 'bar'),
check_exit_code=False,
old_name=None,
portals_ips=[self.configuration.iscsi_ip_address],
portals_port=self.configuration.iscsi_port)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
@mock.patch.object(lio.LioAdm, '_get_iscsi_properties')
def test_initialize_connection(self, mock_get_iscsi, mock_execute,
mpersist_cfg, mlock_exec):
target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id
connector = {'initiator': 'fake_init'}
# Test the normal case
mock_get_iscsi.return_value = 'foo bar'
expected_return = {'driver_volume_type': 'iscsi',
'data': 'foo bar'}
self.assertEqual(expected_return,
self.target.initialize_connection(self.testvol,
connector))
expected_args = ('cinder-rtstool', 'add-initiator', target_id,
self.expected_iscsi_properties['auth_username'],
'2FE0CQ8J196R', connector['initiator'])
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
mock_execute.assert_called_once_with(*expected_args, run_as_root=True)
mpersist_cfg.assert_called_once_with(self.fake_volume_id)
# Test the failure case: putils.ProcessExecutionError
mlock_exec.reset_mock()
mpersist_cfg.reset_mock()
mock_execute.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetAttachFailed,
self.target.initialize_connection,
self.testvol,
connector)
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
# Ensure there have been no calls to persist configuration
self.assertFalse(mpersist_cfg.called)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
def test_terminate_connection(self, mock_execute, mpersist_cfg,
mlock_exec):
target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id
connector = {'initiator': 'fake_init'}
self.target.terminate_connection(self.testvol,
connector)
expected_args = ('cinder-rtstool', 'delete-initiator', target_id,
connector['initiator'])
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
mock_execute.assert_called_once_with(*expected_args, run_as_root=True)
mpersist_cfg.assert_called_once_with(self.fake_volume_id)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
def test_terminate_connection_fail(self, mock_execute, mpersist_cfg,
mlock_exec):
target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id
mock_execute.side_effect = putils.ProcessExecutionError
connector = {'initiator': 'fake_init'}
self.assertRaises(exception.ISCSITargetDetachFailed,
self.target.terminate_connection,
self.testvol,
connector)
mlock_exec.assert_called_once_with('cinder-rtstool',
'delete-initiator', target_id,
connector['initiator'],
run_as_root=True)
self.assertFalse(mpersist_cfg.called)
def test_iscsi_protocol(self):
self.assertEqual(self.target.iscsi_protocol, 'iscsi')
@mock.patch.object(lio.LioAdm, '_get_target_and_lun', return_value=(1, 2))
@mock.patch.object(lio.LioAdm, 'create_iscsi_target', return_value=3)
@mock.patch.object(lio.LioAdm, '_get_target_chap_auth',
return_value=(mock.sentinel.user, mock.sentinel.pwd))
def test_create_export(self, mock_chap, mock_create, mock_get_target):
ctxt = context.get_admin_context()
result = self.target.create_export(ctxt, self.testvol_2,
self.fake_volumes_dir)
loc = (u'%(ip)s:%(port)d,3 %(prefix)s%(name)s 2' %
{'ip': self.configuration.iscsi_ip_address,
'port': self.configuration.iscsi_port,
'prefix': self.iscsi_target_prefix,
'name': self.testvol_2['name']})
expected_result = {
'location': loc,
'auth': 'CHAP %s %s' % (mock.sentinel.user, mock.sentinel.pwd),
}
self.assertEqual(expected_result, result)
mock_create.assert_called_once_with(
self.iscsi_target_prefix + self.testvol_2['name'],
1,
2,
self.fake_volumes_dir,
(mock.sentinel.user, mock.sentinel.pwd),
portals_ips=[self.configuration.iscsi_ip_address],
portals_port=self.configuration.iscsi_port)
|
|
#
#
# Copyright (C) 2009, 2010, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Ganeti confd client
Clients can use the confd client library to send requests to a group of master
candidates running confd. The expected usage is through the asyncore framework,
by sending queries, and asynchronously receiving replies through a callback.
This way the client library doesn't ever need to "wait" on a particular answer,
and can proceed even if some udp packets are lost. It's up to the user to
reschedule queries if they haven't received responses and they need them.
Example usage::
client = ConfdClient(...) # includes callback specification
req = confd_client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
client.SendRequest(req)
# then make sure your client calls asyncore.loop() or daemon.Mainloop.Run()
# ... wait ...
# And your callback will be called by asyncore, when your query gets a
# response, or when it expires.
You can use the provided ConfdFilterCallback to act as a filter, only passing
"newer" answer to your callback, and filtering out outdated ones, or ones
confirming what you already got.
"""
# pylint: disable=E0203
# E0203: Access to member %r before its definition, since we use
# objects.py which doesn't explicitly initialise its members
import time
import random
from ganeti import utils
from ganeti import constants
from ganeti import objects
from ganeti import serializer
from ganeti import daemon # contains AsyncUDPSocket
from ganeti import errors
from ganeti import confd
from ganeti import ssconf
from ganeti import compat
from ganeti import netutils
from ganeti import pathutils
class ConfdAsyncUDPClient(daemon.AsyncUDPSocket):
"""Confd udp asyncore client
This is kept separate from the main ConfdClient to make sure it's easy to
implement a non-asyncore based client library.
"""
def __init__(self, client, family):
"""Constructor for ConfdAsyncUDPClient
@type client: L{ConfdClient}
@param client: client library, to pass the datagrams to
"""
daemon.AsyncUDPSocket.__init__(self, family)
self.client = client
# this method is overriding a daemon.AsyncUDPSocket method
def handle_datagram(self, payload, ip, port):
self.client.HandleResponse(payload, ip, port)
class _Request(object):
"""Request status structure.
@ivar request: the request data
@ivar args: any extra arguments for the callback
@ivar expiry: the expiry timestamp of the request
@ivar sent: the set of contacted peers
@ivar rcvd: the set of peers who replied
"""
def __init__(self, request, args, expiry, sent):
self.request = request
self.args = args
self.expiry = expiry
self.sent = frozenset(sent)
self.rcvd = set()
class ConfdClient(object):
"""Send queries to confd, and get back answers.
Since the confd model works by querying multiple master candidates, and
getting back answers, this is an asynchronous library. It can either work
through asyncore or with your own handling.
@type _requests: dict
@ivar _requests: dictionary indexes by salt, which contains data
about the outstanding requests; the values are objects of type
L{_Request}
"""
def __init__(self, hmac_key, peers, callback, port=None, logger=None):
"""Constructor for ConfdClient
@type hmac_key: string
@param hmac_key: hmac key to talk to confd
@type peers: list
@param peers: list of peer nodes
@type callback: f(L{ConfdUpcallPayload})
@param callback: function to call when getting answers
@type port: integer
@param port: confd port (default: use GetDaemonPort)
@type logger: logging.Logger
@param logger: optional logger for internal conditions
"""
if not callable(callback):
raise errors.ProgrammerError("callback must be callable")
self.UpdatePeerList(peers)
self._SetPeersAddressFamily()
self._hmac_key = hmac_key
self._socket = ConfdAsyncUDPClient(self, self._family)
self._callback = callback
self._confd_port = port
self._logger = logger
self._requests = {}
if self._confd_port is None:
self._confd_port = netutils.GetDaemonPort(constants.CONFD)
def UpdatePeerList(self, peers):
"""Update the list of peers
@type peers: list
@param peers: list of peer nodes
"""
# we are actually called from init, so:
# pylint: disable=W0201
if not isinstance(peers, list):
raise errors.ProgrammerError("peers must be a list")
# make a copy of peers, since we're going to shuffle the list, later
self._peers = list(peers)
def _PackRequest(self, request, now=None):
"""Prepare a request to be sent on the wire.
This function puts a proper salt in a confd request, puts the proper salt,
and adds the correct magic number.
"""
if now is None:
now = time.time()
tstamp = "%d" % now
req = serializer.DumpSignedJson(request.ToDict(), self._hmac_key, tstamp)
return confd.PackMagic(req)
def _UnpackReply(self, payload):
in_payload = confd.UnpackMagic(payload)
(dict_answer, salt) = serializer.LoadSignedJson(in_payload, self._hmac_key)
answer = objects.ConfdReply.FromDict(dict_answer)
return answer, salt
def ExpireRequests(self):
"""Delete all the expired requests.
"""
now = time.time()
for rsalt, rq in self._requests.items():
if now >= rq.expiry:
del self._requests[rsalt]
client_reply = ConfdUpcallPayload(salt=rsalt,
type=UPCALL_EXPIRE,
orig_request=rq.request,
extra_args=rq.args,
client=self,
)
self._callback(client_reply)
def SendRequest(self, request, args=None, coverage=0, async=True):
"""Send a confd request to some MCs
@type request: L{objects.ConfdRequest}
@param request: the request to send
@type args: tuple
@param args: additional callback arguments
@type coverage: integer
@param coverage: number of remote nodes to contact; if default
(0), it will use a reasonable default
(L{ganeti.constants.CONFD_DEFAULT_REQ_COVERAGE}), if -1 is
passed, it will use the maximum number of peers, otherwise the
number passed in will be used
@type async: boolean
@param async: handle the write asynchronously
"""
if coverage == 0:
coverage = min(len(self._peers), constants.CONFD_DEFAULT_REQ_COVERAGE)
elif coverage == -1:
coverage = len(self._peers)
if coverage > len(self._peers):
raise errors.ConfdClientError("Not enough MCs known to provide the"
" desired coverage")
if not request.rsalt:
raise errors.ConfdClientError("Missing request rsalt")
self.ExpireRequests()
if request.rsalt in self._requests:
raise errors.ConfdClientError("Duplicate request rsalt")
if request.type not in constants.CONFD_REQS:
raise errors.ConfdClientError("Invalid request type")
random.shuffle(self._peers)
targets = self._peers[:coverage]
now = time.time()
payload = self._PackRequest(request, now=now)
for target in targets:
try:
self._socket.enqueue_send(target, self._confd_port, payload)
except errors.UdpDataSizeError:
raise errors.ConfdClientError("Request too big")
expire_time = now + constants.CONFD_CLIENT_EXPIRE_TIMEOUT
self._requests[request.rsalt] = _Request(request, args, expire_time,
targets)
if not async:
self.FlushSendQueue()
def HandleResponse(self, payload, ip, port):
"""Asynchronous handler for a confd reply
Call the relevant callback associated to the current request.
"""
try:
try:
answer, salt = self._UnpackReply(payload)
except (errors.SignatureError, errors.ConfdMagicError), err:
if self._logger:
self._logger.debug("Discarding broken package: %s" % err)
return
try:
rq = self._requests[salt]
except KeyError:
if self._logger:
self._logger.debug("Discarding unknown (expired?) reply: %s" % err)
return
rq.rcvd.add(ip)
client_reply = ConfdUpcallPayload(salt=salt,
type=UPCALL_REPLY,
server_reply=answer,
orig_request=rq.request,
server_ip=ip,
server_port=port,
extra_args=rq.args,
client=self,
)
self._callback(client_reply)
finally:
self.ExpireRequests()
def FlushSendQueue(self):
"""Send out all pending requests.
Can be used for synchronous client use.
"""
while self._socket.writable():
self._socket.handle_write()
def ReceiveReply(self, timeout=1):
"""Receive one reply.
@type timeout: float
@param timeout: how long to wait for the reply
@rtype: boolean
@return: True if some data has been handled, False otherwise
"""
return self._socket.process_next_packet(timeout=timeout)
@staticmethod
def _NeededReplies(peer_cnt):
"""Compute the minimum safe number of replies for a query.
The algorithm is designed to work well for both small and big
number of peers:
- for less than three, we require all responses
- for less than five, we allow one miss
- otherwise, half the number plus one
This guarantees that we progress monotonically: 1->1, 2->2, 3->2,
4->2, 5->3, 6->3, 7->4, etc.
@type peer_cnt: int
@param peer_cnt: the number of peers contacted
@rtype: int
@return: the number of replies which should give a safe coverage
"""
if peer_cnt < 3:
return peer_cnt
elif peer_cnt < 5:
return peer_cnt - 1
else:
return int(peer_cnt / 2) + 1
def WaitForReply(self, salt, timeout=constants.CONFD_CLIENT_EXPIRE_TIMEOUT):
"""Wait for replies to a given request.
This method will wait until either the timeout expires or a
minimum number (computed using L{_NeededReplies}) of replies are
received for the given salt. It is useful when doing synchronous
calls to this library.
@param salt: the salt of the request we want responses for
@param timeout: the maximum timeout (should be less or equal to
L{ganeti.constants.CONFD_CLIENT_EXPIRE_TIMEOUT}
@rtype: tuple
@return: a tuple of (timed_out, sent_cnt, recv_cnt); if the
request is unknown, timed_out will be true and the counters
will be zero
"""
def _CheckResponse():
if salt not in self._requests:
# expired?
if self._logger:
self._logger.debug("Discarding unknown/expired request: %s" % salt)
return MISSING
rq = self._requests[salt]
if len(rq.rcvd) >= expected:
# already got all replies
return (False, len(rq.sent), len(rq.rcvd))
# else wait, using default timeout
self.ReceiveReply()
raise utils.RetryAgain()
MISSING = (True, 0, 0)
if salt not in self._requests:
return MISSING
# extend the expire time with the current timeout, so that we
# don't get the request expired from under us
rq = self._requests[salt]
rq.expiry += timeout
sent = len(rq.sent)
expected = self._NeededReplies(sent)
try:
return utils.Retry(_CheckResponse, 0, timeout)
except utils.RetryTimeout:
if salt in self._requests:
rq = self._requests[salt]
return (True, len(rq.sent), len(rq.rcvd))
else:
return MISSING
def _SetPeersAddressFamily(self):
if not self._peers:
raise errors.ConfdClientError("Peer list empty")
try:
peer = self._peers[0]
self._family = netutils.IPAddress.GetAddressFamily(peer)
for peer in self._peers[1:]:
if netutils.IPAddress.GetAddressFamily(peer) != self._family:
raise errors.ConfdClientError("Peers must be of same address family")
except errors.IPAddressError:
raise errors.ConfdClientError("Peer address %s invalid" % peer)
# UPCALL_REPLY: server reply upcall
# has all ConfdUpcallPayload fields populated
UPCALL_REPLY = 1
# UPCALL_EXPIRE: internal library request expire
# has only salt, type, orig_request and extra_args
UPCALL_EXPIRE = 2
CONFD_UPCALL_TYPES = compat.UniqueFrozenset([
UPCALL_REPLY,
UPCALL_EXPIRE,
])
class ConfdUpcallPayload(objects.ConfigObject):
"""Callback argument for confd replies
@type salt: string
@ivar salt: salt associated with the query
@type type: one of confd.client.CONFD_UPCALL_TYPES
@ivar type: upcall type (server reply, expired request, ...)
@type orig_request: L{objects.ConfdRequest}
@ivar orig_request: original request
@type server_reply: L{objects.ConfdReply}
@ivar server_reply: server reply
@type server_ip: string
@ivar server_ip: answering server ip address
@type server_port: int
@ivar server_port: answering server port
@type extra_args: any
@ivar extra_args: 'args' argument of the SendRequest function
@type client: L{ConfdClient}
@ivar client: current confd client instance
"""
__slots__ = [
"salt",
"type",
"orig_request",
"server_reply",
"server_ip",
"server_port",
"extra_args",
"client",
]
class ConfdClientRequest(objects.ConfdRequest):
"""This is the client-side version of ConfdRequest.
This version of the class helps creating requests, on the client side, by
filling in some default values.
"""
def __init__(self, **kwargs):
objects.ConfdRequest.__init__(self, **kwargs)
if not self.rsalt:
self.rsalt = utils.NewUUID()
if not self.protocol:
self.protocol = constants.CONFD_PROTOCOL_VERSION
if self.type not in constants.CONFD_REQS:
raise errors.ConfdClientError("Invalid request type")
class ConfdFilterCallback(object):
"""Callback that calls another callback, but filters duplicate results.
@ivar consistent: a dictionary indexed by salt; for each salt, if
all responses ware identical, this will be True; this is the
expected state on a healthy cluster; on inconsistent or
partitioned clusters, this might be False, if we see answers
with the same serial but different contents
"""
def __init__(self, callback, logger=None):
"""Constructor for ConfdFilterCallback
@type callback: f(L{ConfdUpcallPayload})
@param callback: function to call when getting answers
@type logger: logging.Logger
@param logger: optional logger for internal conditions
"""
if not callable(callback):
raise errors.ProgrammerError("callback must be callable")
self._callback = callback
self._logger = logger
# answers contains a dict of salt -> answer
self._answers = {}
self.consistent = {}
def _LogFilter(self, salt, new_reply, old_reply):
if not self._logger:
return
if new_reply.serial > old_reply.serial:
self._logger.debug("Filtering confirming answer, with newer"
" serial for query %s" % salt)
elif new_reply.serial == old_reply.serial:
if new_reply.answer != old_reply.answer:
self._logger.warning("Got incoherent answers for query %s"
" (serial: %s)" % (salt, new_reply.serial))
else:
self._logger.debug("Filtering confirming answer, with same"
" serial for query %s" % salt)
else:
self._logger.debug("Filtering outdated answer for query %s"
" serial: (%d < %d)" % (salt, old_reply.serial,
new_reply.serial))
def _HandleExpire(self, up):
# if we have no answer we have received none, before the expiration.
if up.salt in self._answers:
del self._answers[up.salt]
if up.salt in self.consistent:
del self.consistent[up.salt]
def _HandleReply(self, up):
"""Handle a single confd reply, and decide whether to filter it.
@rtype: boolean
@return: True if the reply should be filtered, False if it should be passed
on to the up-callback
"""
filter_upcall = False
salt = up.salt
if salt not in self.consistent:
self.consistent[salt] = True
if salt not in self._answers:
# first answer for a query (don't filter, and record)
self._answers[salt] = up.server_reply
elif up.server_reply.serial > self._answers[salt].serial:
# newer answer (record, and compare contents)
old_answer = self._answers[salt]
self._answers[salt] = up.server_reply
if up.server_reply.answer == old_answer.answer:
# same content (filter) (version upgrade was unrelated)
filter_upcall = True
self._LogFilter(salt, up.server_reply, old_answer)
# else: different content, pass up a second answer
else:
# older or same-version answer (duplicate or outdated, filter)
if (up.server_reply.serial == self._answers[salt].serial and
up.server_reply.answer != self._answers[salt].answer):
self.consistent[salt] = False
filter_upcall = True
self._LogFilter(salt, up.server_reply, self._answers[salt])
return filter_upcall
def __call__(self, up):
"""Filtering callback
@type up: L{ConfdUpcallPayload}
@param up: upper callback
"""
filter_upcall = False
if up.type == UPCALL_REPLY:
filter_upcall = self._HandleReply(up)
elif up.type == UPCALL_EXPIRE:
self._HandleExpire(up)
if not filter_upcall:
self._callback(up)
class ConfdCountingCallback(object):
"""Callback that calls another callback, and counts the answers
"""
def __init__(self, callback, logger=None):
"""Constructor for ConfdCountingCallback
@type callback: f(L{ConfdUpcallPayload})
@param callback: function to call when getting answers
@type logger: logging.Logger
@param logger: optional logger for internal conditions
"""
if not callable(callback):
raise errors.ProgrammerError("callback must be callable")
self._callback = callback
self._logger = logger
# answers contains a dict of salt -> count
self._answers = {}
def RegisterQuery(self, salt):
if salt in self._answers:
raise errors.ProgrammerError("query already registered")
self._answers[salt] = 0
def AllAnswered(self):
"""Have all the registered queries received at least an answer?
"""
return compat.all(self._answers.values())
def _HandleExpire(self, up):
# if we have no answer we have received none, before the expiration.
if up.salt in self._answers:
del self._answers[up.salt]
def _HandleReply(self, up):
"""Handle a single confd reply, and decide whether to filter it.
@rtype: boolean
@return: True if the reply should be filtered, False if it should be passed
on to the up-callback
"""
if up.salt in self._answers:
self._answers[up.salt] += 1
def __call__(self, up):
"""Filtering callback
@type up: L{ConfdUpcallPayload}
@param up: upper callback
"""
if up.type == UPCALL_REPLY:
self._HandleReply(up)
elif up.type == UPCALL_EXPIRE:
self._HandleExpire(up)
self._callback(up)
class StoreResultCallback(object):
"""Callback that simply stores the most recent answer.
@ivar _answers: dict of salt to (have_answer, reply)
"""
_NO_KEY = (False, None)
def __init__(self):
"""Constructor for StoreResultCallback
"""
# answers contains a dict of salt -> best result
self._answers = {}
def GetResponse(self, salt):
"""Return the best match for a salt
"""
return self._answers.get(salt, self._NO_KEY)
def _HandleExpire(self, up):
"""Expiration handler.
"""
if up.salt in self._answers and self._answers[up.salt] == self._NO_KEY:
del self._answers[up.salt]
def _HandleReply(self, up):
"""Handle a single confd reply, and decide whether to filter it.
"""
self._answers[up.salt] = (True, up)
def __call__(self, up):
"""Filtering callback
@type up: L{ConfdUpcallPayload}
@param up: upper callback
"""
if up.type == UPCALL_REPLY:
self._HandleReply(up)
elif up.type == UPCALL_EXPIRE:
self._HandleExpire(up)
def GetConfdClient(callback):
"""Return a client configured using the given callback.
This is handy to abstract the MC list and HMAC key reading.
@attention: This should only be called on nodes which are part of a
cluster, since it depends on a valid (ganeti) data directory;
for code running outside of a cluster, you need to create the
client manually
"""
ss = ssconf.SimpleStore()
mc_file = ss.KeyToFilename(constants.SS_MASTER_CANDIDATES_IPS)
mc_list = utils.ReadFile(mc_file).splitlines()
hmac_key = utils.ReadFile(pathutils.CONFD_HMAC_KEY)
return ConfdClient(hmac_key, mc_list, callback)
|
|
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the methods related to query."""
import datetime
import fixtures
import mock
from oslo_utils import timeutils
from oslotest import base
from oslotest import mockpatch
import wsme
from ceilometer.alarm.storage import base as alarm_storage_base
from ceilometer.api.controllers.v2 import base as v2_base
from ceilometer.api.controllers.v2 import meters
from ceilometer.api.controllers.v2 import utils
from ceilometer import storage
from ceilometer.storage import base as storage_base
from ceilometer.tests import base as tests_base
class TestQuery(base.BaseTestCase):
def setUp(self):
super(TestQuery, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'pecan.response', mock.MagicMock()))
def test_get_value_as_type_with_integer(self):
query = v2_base.Query(field='metadata.size',
op='eq',
value='123',
type='integer')
expected = 123
self.assertEqual(expected, query._get_value_as_type())
def test_get_value_as_type_with_float(self):
query = v2_base.Query(field='metadata.size',
op='eq',
value='123.456',
type='float')
expected = 123.456
self.assertEqual(expected, query._get_value_as_type())
def test_get_value_as_type_with_boolean(self):
query = v2_base.Query(field='metadata.is_public',
op='eq',
value='True',
type='boolean')
expected = True
self.assertEqual(expected, query._get_value_as_type())
def test_get_value_as_type_with_string(self):
query = v2_base.Query(field='metadata.name',
op='eq',
value='linux',
type='string')
expected = 'linux'
self.assertEqual(expected, query._get_value_as_type())
def test_get_value_as_type_with_datetime(self):
query = v2_base.Query(field='metadata.date',
op='eq',
value='2014-01-01T05:00:00',
type='datetime')
self.assertIsInstance(query._get_value_as_type(), datetime.datetime)
self.assertIsNone(query._get_value_as_type().tzinfo)
def test_get_value_as_type_with_integer_without_type(self):
query = v2_base.Query(field='metadata.size',
op='eq',
value='123')
expected = 123
self.assertEqual(expected, query._get_value_as_type())
def test_get_value_as_type_with_float_without_type(self):
query = v2_base.Query(field='metadata.size',
op='eq',
value='123.456')
expected = 123.456
self.assertEqual(expected, query._get_value_as_type())
def test_get_value_as_type_with_boolean_without_type(self):
query = v2_base.Query(field='metadata.is_public',
op='eq',
value='True')
expected = True
self.assertEqual(expected, query._get_value_as_type())
def test_get_value_as_type_with_string_without_type(self):
query = v2_base.Query(field='metadata.name',
op='eq',
value='linux')
expected = 'linux'
self.assertEqual(expected, query._get_value_as_type())
def test_get_value_as_type_with_bad_type(self):
query = v2_base.Query(field='metadata.size',
op='eq',
value='123.456',
type='blob')
self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type)
def test_get_value_as_type_with_bad_value(self):
query = v2_base.Query(field='metadata.size',
op='eq',
value='fake',
type='integer')
self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type)
def test_get_value_as_type_integer_expression_without_type(self):
# bug 1221736
query = v2_base.Query(field='should_be_a_string',
op='eq',
value='WWW-Layer-4a80714f')
expected = 'WWW-Layer-4a80714f'
self.assertEqual(expected, query._get_value_as_type())
def test_get_value_as_type_boolean_expression_without_type(self):
# bug 1221736
query = v2_base.Query(field='should_be_a_string',
op='eq',
value='True or False')
expected = 'True or False'
self.assertEqual(expected, query._get_value_as_type())
def test_get_value_as_type_with_syntax_error(self):
# bug 1221736
value = 'WWW-Layer-4a80714f-0232-4580-aa5e-81494d1a4147-uolhh25p5xxm'
query = v2_base.Query(field='group_id',
op='eq',
value=value)
expected = value
self.assertEqual(expected, query._get_value_as_type())
def test_get_value_as_type_with_syntax_error_colons(self):
# bug 1221736
value = 'Ref::StackId'
query = v2_base.Query(field='field_name',
op='eq',
value=value)
expected = value
self.assertEqual(expected, query._get_value_as_type())
class TestValidateGroupByFields(base.BaseTestCase):
def test_valid_field(self):
result = meters._validate_groupby_fields(['user_id'])
self.assertEqual(['user_id'], result)
def test_valid_fields_multiple(self):
result = set(meters._validate_groupby_fields(
['user_id', 'project_id', 'source']))
self.assertEqual(set(['user_id', 'project_id', 'source']), result)
def test_invalid_field(self):
self.assertRaises(wsme.exc.UnknownArgument,
meters._validate_groupby_fields,
['wtf'])
def test_invalid_field_multiple(self):
self.assertRaises(wsme.exc.UnknownArgument,
meters._validate_groupby_fields,
['user_id', 'wtf', 'project_id', 'source'])
def test_duplicate_fields(self):
result = set(
meters._validate_groupby_fields(['user_id', 'source', 'user_id'])
)
self.assertEqual(set(['user_id', 'source']), result)
class TestQueryToKwArgs(tests_base.BaseTestCase):
def setUp(self):
super(TestQueryToKwArgs, self).setUp()
self.useFixture(mockpatch.PatchObject(
utils, 'sanitize_query', side_effect=lambda x, y, **z: x))
self.useFixture(mockpatch.PatchObject(
utils, '_verify_query_segregation', side_effect=lambda x, **z: x))
def test_sample_filter_single(self):
q = [v2_base.Query(field='user_id',
op='eq',
value='uid')]
kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
self.assertIn('user', kwargs)
self.assertEqual(1, len(kwargs))
self.assertEqual('uid', kwargs['user'])
def test_sample_filter_multi(self):
q = [v2_base.Query(field='user_id',
op='eq',
value='uid'),
v2_base.Query(field='project_id',
op='eq',
value='pid'),
v2_base.Query(field='resource_id',
op='eq',
value='rid'),
v2_base.Query(field='source',
op='eq',
value='source_name'),
v2_base.Query(field='meter',
op='eq',
value='meter_name')]
kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
self.assertEqual(5, len(kwargs))
self.assertEqual('uid', kwargs['user'])
self.assertEqual('pid', kwargs['project'])
self.assertEqual('rid', kwargs['resource'])
self.assertEqual('source_name', kwargs['source'])
self.assertEqual('meter_name', kwargs['meter'])
def test_sample_filter_timestamp(self):
ts_start = timeutils.utcnow()
ts_end = ts_start + datetime.timedelta(minutes=5)
q = [v2_base.Query(field='timestamp',
op='lt',
value=str(ts_end)),
v2_base.Query(field='timestamp',
op='gt',
value=str(ts_start))]
kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
self.assertEqual(4, len(kwargs))
self.assertTimestampEqual(kwargs['start_timestamp'], ts_start)
self.assertTimestampEqual(kwargs['end_timestamp'], ts_end)
self.assertEqual('gt', kwargs['start_timestamp_op'])
self.assertEqual('lt', kwargs['end_timestamp_op'])
def test_sample_filter_meta(self):
q = [v2_base.Query(field='metadata.size',
op='eq',
value='20'),
v2_base.Query(field='resource_metadata.id',
op='eq',
value='meta_id')]
kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
self.assertEqual(1, len(kwargs))
self.assertEqual(2, len(kwargs['metaquery']))
self.assertEqual(20, kwargs['metaquery']['metadata.size'])
self.assertEqual('meta_id', kwargs['metaquery']['metadata.id'])
def test_sample_filter_non_equality_on_metadata(self):
queries = [v2_base.Query(field='resource_metadata.image_id',
op='gt',
value='image',
type='string'),
v2_base.Query(field='metadata.ramdisk_id',
op='le',
value='ramdisk',
type='string')]
with mock.patch('pecan.request') as request:
request.headers.return_value = {'X-ProjectId': 'foobar'}
self.assertRaises(
wsme.exc.InvalidInput,
utils.query_to_kwargs,
queries,
storage.SampleFilter.__init__)
def test_sample_filter_invalid_field(self):
q = [v2_base.Query(field='invalid',
op='eq',
value='20')]
self.assertRaises(
wsme.exc.UnknownArgument,
utils.query_to_kwargs, q, storage.SampleFilter.__init__)
def test_sample_filter_invalid_op(self):
q = [v2_base.Query(field='user_id',
op='lt',
value='20')]
self.assertRaises(
wsme.exc.InvalidInput,
utils.query_to_kwargs, q, storage.SampleFilter.__init__)
def test_sample_filter_timestamp_invalid_op(self):
ts_start = timeutils.utcnow()
q = [v2_base.Query(field='timestamp',
op='eq',
value=str(ts_start))]
self.assertRaises(
wsme.exc.InvalidInput,
utils.query_to_kwargs, q, storage.SampleFilter.__init__)
def test_sample_filter_exclude_internal(self):
queries = [v2_base.Query(field=f,
op='eq',
value='fake',
type='string')
for f in ['y', 'on_behalf_of', 'x']]
with mock.patch('pecan.request') as request:
request.headers.return_value = {'X-ProjectId': 'foobar'}
self.assertRaises(wsme.exc.ClientSideError,
utils.query_to_kwargs,
queries,
storage.SampleFilter.__init__,
internal_keys=['on_behalf_of'])
def test_sample_filter_self_always_excluded(self):
queries = [v2_base.Query(field='user_id',
op='eq',
value='20')]
with mock.patch('pecan.request') as request:
request.headers.return_value = {'X-ProjectId': 'foobar'}
kwargs = utils.query_to_kwargs(queries,
storage.SampleFilter.__init__)
self.assertFalse('self' in kwargs)
def test_sample_filter_translation(self):
queries = [v2_base.Query(field=f,
op='eq',
value='fake_%s' % f,
type='string') for f in ['user_id',
'project_id',
'resource_id']]
with mock.patch('pecan.request') as request:
request.headers.return_value = {'X-ProjectId': 'foobar'}
kwargs = utils.query_to_kwargs(queries,
storage.SampleFilter.__init__)
for o in ['user', 'project', 'resource']:
self.assertEqual('fake_%s_id' % o, kwargs.get(o))
def test_timestamp_validation(self):
q = [v2_base.Query(field='timestamp',
op='le',
value='123')]
exc = self.assertRaises(
wsme.exc.InvalidInput,
utils.query_to_kwargs, q, storage.SampleFilter.__init__)
expected_exc = wsme.exc.InvalidInput('timestamp', '123',
'invalid timestamp format')
self.assertEqual(str(expected_exc), str(exc))
def test_get_alarm_changes_filter_valid_fields(self):
q = [v2_base.Query(field='abc',
op='eq',
value='abc')]
exc = self.assertRaises(
wsme.exc.UnknownArgument,
utils.query_to_kwargs, q,
alarm_storage_base.Connection.get_alarm_changes)
valid_keys = ['alarm_id', 'on_behalf_of', 'project', 'search_offset',
'severity', 'timestamp', 'type', 'user']
msg = ("unrecognized field in query: %s, "
"valid keys: %s") % (q, valid_keys)
expected_exc = wsme.exc.UnknownArgument('abc', msg)
self.assertEqual(str(expected_exc), str(exc))
def test_sample_filter_valid_fields(self):
q = [v2_base.Query(field='abc',
op='eq',
value='abc')]
exc = self.assertRaises(
wsme.exc.UnknownArgument,
utils.query_to_kwargs, q, storage.SampleFilter.__init__)
valid_keys = ['message_id', 'meter', 'project', 'resource',
'search_offset', 'source', 'timestamp', 'user']
msg = ("unrecognized field in query: %s, "
"valid keys: %s") % (q, valid_keys)
expected_exc = wsme.exc.UnknownArgument('abc', msg)
self.assertEqual(str(expected_exc), str(exc))
def test_get_meters_filter_valid_fields(self):
q = [v2_base.Query(field='abc',
op='eq',
value='abc')]
exc = self.assertRaises(
wsme.exc.UnknownArgument,
utils.query_to_kwargs, q, storage_base.Connection.get_meters)
valid_keys = ['project', 'resource', 'source', 'user']
msg = ("unrecognized field in query: %s, "
"valid keys: %s") % (q, valid_keys)
expected_exc = wsme.exc.UnknownArgument('abc', msg)
self.assertEqual(str(expected_exc), str(exc))
def test_get_resources_filter_valid_fields(self):
q = [v2_base.Query(field='abc',
op='eq',
value='abc')]
exc = self.assertRaises(
wsme.exc.UnknownArgument,
utils.query_to_kwargs, q, storage_base.Connection.get_resources)
valid_keys = ['project', 'resource',
'search_offset', 'source', 'timestamp', 'user']
msg = ("unrecognized field in query: %s, "
"valid keys: %s") % (q, valid_keys)
expected_exc = wsme.exc.UnknownArgument('abc', msg)
self.assertEqual(str(expected_exc), str(exc))
def test_get_alarms_filter_valid_fields(self):
q = [v2_base.Query(field='abc',
op='eq',
value='abc')]
exc = self.assertRaises(
wsme.exc.UnknownArgument,
utils.query_to_kwargs, q,
alarm_storage_base.Connection.get_alarms)
valid_keys = ['alarm_id', 'enabled', 'meter', 'name',
'project', 'severity', 'state', 'type', 'user']
msg = ("unrecognized field in query: %s, "
"valid keys: %s") % (q, valid_keys)
expected_exc = wsme.exc.UnknownArgument('abc', msg)
self.assertEqual(str(expected_exc), str(exc))
|
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""XML reporting for coverage.py"""
import os
import os.path
import sys
import time
import xml.dom.minidom
from coverage import __url__, __version__, files
from coverage.misc import isolate_module, human_sorted, human_sorted_items
from coverage.report import get_analysis_to_report
os = isolate_module(os)
DTD_URL = 'https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd'
def rate(hit, num):
"""Return the fraction of `hit`/`num`, as a string."""
if num == 0:
return "1"
else:
return "%.4g" % (float(hit) / num)
class XmlReporter:
"""A reporter for writing Cobertura-style XML coverage results."""
report_type = "XML report"
def __init__(self, coverage):
self.coverage = coverage
self.config = self.coverage.config
self.source_paths = set()
if self.config.source:
for src in self.config.source:
if os.path.exists(src):
if not self.config.relative_files:
src = files.canonical_filename(src)
self.source_paths.add(src)
self.packages = {}
self.xml_out = None
def report(self, morfs, outfile=None):
"""Generate a Cobertura-compatible XML report for `morfs`.
`morfs` is a list of modules or file names.
`outfile` is a file object to write the XML to.
"""
# Initial setup.
outfile = outfile or sys.stdout
has_arcs = self.coverage.get_data().has_arcs()
# Create the DOM that will store the data.
impl = xml.dom.minidom.getDOMImplementation()
self.xml_out = impl.createDocument(None, "coverage", None)
# Write header stuff.
xcoverage = self.xml_out.documentElement
xcoverage.setAttribute("version", __version__)
xcoverage.setAttribute("timestamp", str(int(time.time()*1000)))
xcoverage.appendChild(self.xml_out.createComment(
" Generated by coverage.py: %s " % __url__
))
xcoverage.appendChild(self.xml_out.createComment(" Based on %s " % DTD_URL))
# Call xml_file for each file in the data.
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
self.xml_file(fr, analysis, has_arcs)
xsources = self.xml_out.createElement("sources")
xcoverage.appendChild(xsources)
# Populate the XML DOM with the source info.
for path in human_sorted(self.source_paths):
xsource = self.xml_out.createElement("source")
xsources.appendChild(xsource)
txt = self.xml_out.createTextNode(path)
xsource.appendChild(txt)
lnum_tot, lhits_tot = 0, 0
bnum_tot, bhits_tot = 0, 0
xpackages = self.xml_out.createElement("packages")
xcoverage.appendChild(xpackages)
# Populate the XML DOM with the package info.
for pkg_name, pkg_data in human_sorted_items(self.packages.items()):
class_elts, lhits, lnum, bhits, bnum = pkg_data
xpackage = self.xml_out.createElement("package")
xpackages.appendChild(xpackage)
xclasses = self.xml_out.createElement("classes")
xpackage.appendChild(xclasses)
for _, class_elt in human_sorted_items(class_elts.items()):
xclasses.appendChild(class_elt)
xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
xpackage.setAttribute("line-rate", rate(lhits, lnum))
if has_arcs:
branch_rate = rate(bhits, bnum)
else:
branch_rate = "0"
xpackage.setAttribute("branch-rate", branch_rate)
xpackage.setAttribute("complexity", "0")
lnum_tot += lnum
lhits_tot += lhits
bnum_tot += bnum
bhits_tot += bhits
xcoverage.setAttribute("lines-valid", str(lnum_tot))
xcoverage.setAttribute("lines-covered", str(lhits_tot))
xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
if has_arcs:
xcoverage.setAttribute("branches-valid", str(bnum_tot))
xcoverage.setAttribute("branches-covered", str(bhits_tot))
xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot))
else:
xcoverage.setAttribute("branches-covered", "0")
xcoverage.setAttribute("branches-valid", "0")
xcoverage.setAttribute("branch-rate", "0")
xcoverage.setAttribute("complexity", "0")
# Write the output file.
outfile.write(serialize_xml(self.xml_out))
# Return the total percentage.
denom = lnum_tot + bnum_tot
if denom == 0:
pct = 0.0
else:
pct = 100.0 * (lhits_tot + bhits_tot) / denom
return pct
def xml_file(self, fr, analysis, has_arcs):
"""Add to the XML report for a single file."""
if self.config.skip_empty:
if analysis.numbers.n_statements == 0:
return
# Create the 'lines' and 'package' XML elements, which
# are populated later. Note that a package == a directory.
filename = fr.filename.replace("\\", "/")
for source_path in self.source_paths:
source_path = files.canonical_filename(source_path)
if filename.startswith(source_path.replace("\\", "/") + "/"):
rel_name = filename[len(source_path)+1:]
break
else:
rel_name = fr.relative_filename()
self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/"))
dirname = os.path.dirname(rel_name) or "."
dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth])
package_name = dirname.replace("/", ".")
package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0])
xclass = self.xml_out.createElement("class")
xclass.appendChild(self.xml_out.createElement("methods"))
xlines = self.xml_out.createElement("lines")
xclass.appendChild(xlines)
xclass.setAttribute("name", os.path.relpath(rel_name, dirname))
xclass.setAttribute("filename", rel_name.replace("\\", "/"))
xclass.setAttribute("complexity", "0")
branch_stats = analysis.branch_stats()
missing_branch_arcs = analysis.missing_branch_arcs()
# For each statement, create an XML 'line' element.
for line in sorted(analysis.statements):
xline = self.xml_out.createElement("line")
xline.setAttribute("number", str(line))
# Q: can we get info about the number of times a statement is
# executed? If so, that should be recorded here.
xline.setAttribute("hits", str(int(line not in analysis.missing)))
if has_arcs:
if line in branch_stats:
total, taken = branch_stats[line]
xline.setAttribute("branch", "true")
xline.setAttribute(
"condition-coverage",
"%d%% (%d/%d)" % (100*taken//total, taken, total)
)
if line in missing_branch_arcs:
annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
xline.setAttribute("missing-branches", ",".join(annlines))
xlines.appendChild(xline)
class_lines = len(analysis.statements)
class_hits = class_lines - len(analysis.missing)
if has_arcs:
class_branches = sum(t for t, k in branch_stats.values())
missing_branches = sum(t - k for t, k in branch_stats.values())
class_br_hits = class_branches - missing_branches
else:
class_branches = 0.0
class_br_hits = 0.0
# Finalize the statistics that are collected in the XML DOM.
xclass.setAttribute("line-rate", rate(class_hits, class_lines))
if has_arcs:
branch_rate = rate(class_br_hits, class_branches)
else:
branch_rate = "0"
xclass.setAttribute("branch-rate", branch_rate)
package[0][rel_name] = xclass
package[1] += class_hits
package[2] += class_lines
package[3] += class_br_hits
package[4] += class_branches
def serialize_xml(dom):
"""Serialize a minidom node to XML."""
return dom.toprettyxml()
|
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.dsvsv
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
import testtools
from neutron.tests.api import base
from neutron.tests.api import clients
from neutron.tests.tempest import config
from neutron.tests.tempest import test
from tempest_lib.common.utils import data_utils
CONF = config.CONF
class SharedNetworksTest(base.BaseAdminNetworkTest):
@classmethod
def resource_setup(cls):
super(SharedNetworksTest, cls).resource_setup()
cls.shared_network = cls.create_shared_network()
@test.idempotent_id('6661d219-b96d-4597-ad10-55766123421a')
def test_filtering_shared_networks(self):
# this test is necessary because the 'shared' column does not actually
# exist on networks so the filter function has to translate it into
# queries against the RBAC table
self.create_network()
self._check_shared_correct(
self.client.list_networks(shared=True)['networks'], True)
self._check_shared_correct(
self.admin_client.list_networks(shared=True)['networks'], True)
self._check_shared_correct(
self.client.list_networks(shared=False)['networks'], False)
self._check_shared_correct(
self.admin_client.list_networks(shared=False)['networks'], False)
def _check_shared_correct(self, items, shared):
self.assertNotEmpty(items)
self.assertTrue(all(n['shared'] == shared for n in items))
@test.idempotent_id('6661d219-b96d-4597-ad10-51672353421a')
def test_filtering_shared_subnets(self):
# shared subnets need to be tested because their shared status isn't
# visible as a regular API attribute and it's solely dependent on the
# parent network
reg = self.create_network()
priv = self.create_subnet(reg, client=self.client)
shared = self.create_subnet(self.shared_network,
client=self.admin_client)
self.assertIn(shared, self.client.list_subnets(shared=True)['subnets'])
self.assertIn(shared,
self.admin_client.list_subnets(shared=True)['subnets'])
self.assertNotIn(priv,
self.client.list_subnets(shared=True)['subnets'])
self.assertNotIn(priv,
self.admin_client.list_subnets(shared=True)['subnets'])
self.assertIn(priv, self.client.list_subnets(shared=False)['subnets'])
self.assertIn(priv,
self.admin_client.list_subnets(shared=False)['subnets'])
self.assertNotIn(shared,
self.client.list_subnets(shared=False)['subnets'])
self.assertNotIn(shared,
self.admin_client.list_subnets(shared=False)['subnets'])
@test.idempotent_id('6661d219-b96d-4597-ad10-55766ce4abf7')
def test_create_update_shared_network(self):
shared_network = self.create_shared_network()
net_id = shared_network['id']
self.assertEqual('ACTIVE', shared_network['status'])
self.assertIsNotNone(shared_network['id'])
self.assertTrue(self.shared_network['shared'])
new_name = "New_shared_network"
body = self.admin_client.update_network(net_id, name=new_name,
admin_state_up=False,
shared=False)
updated_net = body['network']
self.assertEqual(new_name, updated_net['name'])
self.assertFalse(updated_net['shared'])
self.assertFalse(updated_net['admin_state_up'])
@test.idempotent_id('9c31fabb-0181-464f-9ace-95144fe9ca77')
def test_create_port_shared_network_as_non_admin_tenant(self):
# create a port as non admin
body = self.client.create_port(network_id=self.shared_network['id'])
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
# verify the tenant id of admin network and non admin port
self.assertNotEqual(self.shared_network['tenant_id'],
port['tenant_id'])
@test.idempotent_id('3e39c4a6-9caf-4710-88f1-d20073c6dd76')
def test_create_bulk_shared_network(self):
# Creates 2 networks in one request
net_nm = [data_utils.rand_name('network'),
data_utils.rand_name('network')]
body = self.admin_client.create_bulk_network(net_nm, shared=True)
created_networks = body['networks']
for net in created_networks:
self.addCleanup(self.admin_client.delete_network, net['id'])
self.assertIsNotNone(net['id'])
self.assertTrue(net['shared'])
def _list_shared_networks(self, user):
body = user.list_networks(shared=True)
networks_list = [net['id'] for net in body['networks']]
self.assertIn(self.shared_network['id'], networks_list)
self.assertTrue(self.shared_network['shared'])
@test.idempotent_id('a064a9fd-e02f-474a-8159-f828cd636a28')
def test_list_shared_networks(self):
# List the shared networks and confirm that
# shared network extension attribute is returned for those networks
# that are created as shared
self._list_shared_networks(self.admin_client)
self._list_shared_networks(self.client)
def _show_shared_network(self, user):
body = user.show_network(self.shared_network['id'])
show_shared_net = body['network']
self.assertEqual(self.shared_network['name'], show_shared_net['name'])
self.assertEqual(self.shared_network['id'], show_shared_net['id'])
self.assertTrue(show_shared_net['shared'])
@test.idempotent_id('e03c92a2-638d-4bfa-b50a-b1f66f087e58')
def test_show_shared_networks_attribute(self):
# Show a shared network and confirm that
# shared network extension attribute is returned.
self._show_shared_network(self.admin_client)
self._show_shared_network(self.client)
class AllowedAddressPairSharedNetworkTest(base.BaseAdminNetworkTest):
allowed_address_pairs = [{'ip_address': '1.1.1.1'}]
@classmethod
def skip_checks(cls):
super(AllowedAddressPairSharedNetworkTest, cls).skip_checks()
if not test.is_extension_enabled('allowed-address-pairs', 'network'):
msg = "Allowed Address Pairs extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(AllowedAddressPairSharedNetworkTest, cls).resource_setup()
cls.network = cls.create_shared_network()
cls.create_subnet(cls.network, client=cls.admin_client)
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-ffffffff1fff')
def test_create_with_address_pair_blocked_on_other_network(self):
with testtools.ExpectedException(lib_exc.Forbidden):
self.create_port(self.network,
allowed_address_pairs=self.allowed_address_pairs)
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-ffffffff2fff')
def test_update_with_address_pair_blocked_on_other_network(self):
port = self.create_port(self.network)
with testtools.ExpectedException(lib_exc.Forbidden):
self.update_port(
port, allowed_address_pairs=self.allowed_address_pairs)
class RBACSharedNetworksTest(base.BaseAdminNetworkTest):
force_tenant_isolation = True
@classmethod
def resource_setup(cls):
super(RBACSharedNetworksTest, cls).resource_setup()
if not test.is_extension_enabled('rbac_policies', 'network'):
msg = "rbac extension not enabled."
raise cls.skipException(msg)
creds = cls.isolated_creds.get_alt_creds()
cls.client2 = clients.Manager(credentials=creds).network_client
def _make_admin_net_and_subnet_shared_to_tenant_id(self, tenant_id):
net = self.admin_client.create_network(
name=data_utils.rand_name('test-network-'))['network']
self.addCleanup(self.admin_client.delete_network, net['id'])
subnet = self.create_subnet(net, client=self.admin_client)
# network is shared to first unprivileged client by default
pol = self.admin_client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=tenant_id
)['rbac_policy']
return {'network': net, 'subnet': subnet, 'policy': pol}
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff1fff')
def test_network_only_visible_to_policy_target(self):
net = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)['network']
self.client.show_network(net['id'])
with testtools.ExpectedException(lib_exc.NotFound):
# client2 has not been granted access
self.client2.show_network(net['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff2fff')
def test_subnet_on_network_only_visible_to_policy_target(self):
sub = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)['subnet']
self.client.show_subnet(sub['id'])
with testtools.ExpectedException(lib_exc.NotFound):
# client2 has not been granted access
self.client2.show_subnet(sub['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff2eee')
def test_policy_target_update(self):
res = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
# change to client2
update_res = self.admin_client.update_rbac_policy(
res['policy']['id'], target_tenant=self.client2.tenant_id)
self.assertEqual(self.client2.tenant_id,
update_res['rbac_policy']['target_tenant'])
# make sure everything else stayed the same
res['policy'].pop('target_tenant')
update_res['rbac_policy'].pop('target_tenant')
self.assertEqual(res['policy'], update_res['rbac_policy'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff3fff')
def test_port_presence_prevents_network_rbac_policy_deletion(self):
res = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
port = self.client.create_port(network_id=res['network']['id'])['port']
# a port on the network should prevent the deletion of a policy
# required for it to exist
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.delete_rbac_policy(res['policy']['id'])
# a wildcard policy should allow the specific policy to be deleted
# since it allows the remaining port
wild = self.admin_client.create_rbac_policy(
object_type='network', object_id=res['network']['id'],
action='access_as_shared', target_tenant='*')['rbac_policy']
self.admin_client.delete_rbac_policy(res['policy']['id'])
# now that wildcard is the only remaining, it should be subjected to
# to the same restriction
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.delete_rbac_policy(wild['id'])
# similarly, we can't update the policy to a different tenant
with testtools.ExpectedException(lib_exc.Conflict):
self.admin_client.update_rbac_policy(
wild['id'], target_tenant=self.client2.tenant_id)
self.client.delete_port(port['id'])
# anchor is gone, delete should pass
self.admin_client.delete_rbac_policy(wild['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-beefbeefbeef')
def test_tenant_can_delete_port_on_own_network(self):
# TODO(kevinbenton): make adjustments to the db lookup to
# make this work.
msg = "Non-admin cannot currently delete other's ports."
raise self.skipException(msg)
# pylint: disable=unreachable
net = self.create_network() # owned by self.client
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
port = self.client2.create_port(network_id=net['id'])['port']
self.client.delete_port(port['id'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff4fff')
def test_regular_client_shares_to_another_regular_client(self):
net = self.create_network() # owned by self.client
with testtools.ExpectedException(lib_exc.NotFound):
self.client2.show_network(net['id'])
pol = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
self.client2.show_network(net['id'])
self.assertIn(pol['rbac_policy'],
self.client.list_rbac_policies()['rbac_policies'])
# ensure that 'client2' can't see the policy sharing the network to it
# because the policy belongs to 'client'
self.assertNotIn(pol['rbac_policy']['id'],
[p['id']
for p in self.client2.list_rbac_policies()['rbac_policies']])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff5fff')
def test_policy_show(self):
res = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
p1 = res['policy']
p2 = self.admin_client.create_rbac_policy(
object_type='network', object_id=res['network']['id'],
action='access_as_shared',
target_tenant='*')['rbac_policy']
self.assertEqual(
p1, self.admin_client.show_rbac_policy(p1['id'])['rbac_policy'])
self.assertEqual(
p2, self.admin_client.show_rbac_policy(p2['id'])['rbac_policy'])
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff6fff')
def test_regular_client_blocked_from_sharing_anothers_network(self):
net = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)['network']
with testtools.ExpectedException(lib_exc.BadRequest):
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client.tenant_id)
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-afffffff7fff')
def test_regular_client_blocked_from_sharing_with_wildcard(self):
net = self.create_network()
with testtools.ExpectedException(lib_exc.Forbidden):
self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant='*')
# ensure it works on update as well
pol = self.client.create_rbac_policy(
object_type='network', object_id=net['id'],
action='access_as_shared', target_tenant=self.client2.tenant_id)
with testtools.ExpectedException(lib_exc.Forbidden):
self.client.update_rbac_policy(pol['rbac_policy']['id'],
target_tenant='*')
@test.attr(type='smoke')
@test.idempotent_id('86c3529b-1231-40de-803c-aeeeeeee7fff')
def test_filtering_works_with_rbac_records_present(self):
resp = self._make_admin_net_and_subnet_shared_to_tenant_id(
self.client.tenant_id)
net = resp['network']['id']
sub = resp['subnet']['id']
self.admin_client.create_rbac_policy(
object_type='network', object_id=net,
action='access_as_shared', target_tenant='*')
self._assert_shared_object_id_listing_presence('subnets', False, sub)
self._assert_shared_object_id_listing_presence('subnets', True, sub)
self._assert_shared_object_id_listing_presence('networks', False, net)
self._assert_shared_object_id_listing_presence('networks', True, net)
def _assert_shared_object_id_listing_presence(self, resource, shared, oid):
lister = getattr(self.admin_client, 'list_%s' % resource)
objects = [o['id'] for o in lister(shared=shared)[resource]]
if shared:
self.assertIn(oid, objects)
else:
self.assertNotIn(oid, objects)
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
import ast
import unittest
from copy import deepcopy
import httplib2
from googleapiclient.errors import HttpError
from airflow import AirflowException
from airflow.gcp.operators.compute import (
GceInstanceGroupManagerUpdateTemplateOperator, GceInstanceStartOperator, GceInstanceStopOperator,
GceInstanceTemplateCopyOperator, GceSetMachineTypeOperator,
)
from airflow.models import DAG, TaskInstance
from airflow.utils import timezone
from tests.compat import mock
EMPTY_CONTENT = b''
GCP_PROJECT_ID = 'project-id'
GCE_ZONE = 'zone'
RESOURCE_ID = 'resource-id'
GCE_SHORT_MACHINE_TYPE_NAME = 'n1-machine-type'
SET_MACHINE_TYPE_BODY = {
'machineType': 'zones/{}/machineTypes/{}'.format(GCE_ZONE, GCE_SHORT_MACHINE_TYPE_NAME)
}
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestGceInstanceStart(unittest.TestCase):
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_instance_start(self, mock_hook):
mock_hook.return_value.start_instance.return_value = True
op = GceInstanceStartOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
task_id='id'
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.start_instance.assert_called_once_with(
zone=GCE_ZONE, resource_id=RESOURCE_ID, project_id=GCP_PROJECT_ID
)
self.assertTrue(result)
# Setting all of the operator's input parameters as template dag_ids
# (could be anything else) just to test if the templating works for all fields
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_instance_start_with_templates(self, _):
dag_id = 'test_dag_id'
args = {
'start_date': DEFAULT_DATE
}
self.dag = DAG(dag_id, default_args=args) # pylint: disable=attribute-defined-outside-init
op = GceInstanceStartOperator(
project_id='{{ dag.dag_id }}',
zone='{{ dag.dag_id }}',
resource_id='{{ dag.dag_id }}',
gcp_conn_id='{{ dag.dag_id }}',
api_version='{{ dag.dag_id }}',
task_id='id',
dag=self.dag
)
ti = TaskInstance(op, DEFAULT_DATE)
ti.render_templates()
self.assertEqual(dag_id, getattr(op, 'project_id'))
self.assertEqual(dag_id, getattr(op, 'zone'))
self.assertEqual(dag_id, getattr(op, 'resource_id'))
self.assertEqual(dag_id, getattr(op, 'gcp_conn_id'))
self.assertEqual(dag_id, getattr(op, 'api_version'))
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_start_should_throw_ex_when_missing_project_id(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = GceInstanceStartOperator(
project_id="",
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
task_id='id'
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'project_id' is missing", str(err))
mock_hook.assert_not_called()
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_start_should_not_throw_ex_when_project_id_none(self, _):
op = GceInstanceStartOperator(
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
task_id='id'
)
op.execute(None)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_start_should_throw_ex_when_missing_zone(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = GceInstanceStartOperator(
project_id=GCP_PROJECT_ID,
zone="",
resource_id=RESOURCE_ID,
task_id='id'
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'zone' is missing", str(err))
mock_hook.assert_not_called()
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_start_should_throw_ex_when_missing_resource_id(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = GceInstanceStartOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id="",
task_id='id'
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'resource_id' is missing", str(err))
mock_hook.assert_not_called()
class TestGceInstanceStop(unittest.TestCase):
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_instance_stop(self, mock_hook):
op = GceInstanceStopOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
task_id='id'
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.stop_instance.assert_called_once_with(
zone=GCE_ZONE, resource_id=RESOURCE_ID, project_id=GCP_PROJECT_ID
)
# Setting all of the operator's input parameters as templated dag_ids
# (could be anything else) just to test if the templating works for all fields
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_instance_stop_with_templates(self, _):
dag_id = 'test_dag_id'
args = {
'start_date': DEFAULT_DATE
}
self.dag = DAG(dag_id, default_args=args) # pylint: disable=attribute-defined-outside-init
op = GceInstanceStopOperator(
project_id='{{ dag.dag_id }}',
zone='{{ dag.dag_id }}',
resource_id='{{ dag.dag_id }}',
gcp_conn_id='{{ dag.dag_id }}',
api_version='{{ dag.dag_id }}',
task_id='id',
dag=self.dag
)
ti = TaskInstance(op, DEFAULT_DATE)
ti.render_templates()
self.assertEqual(dag_id, getattr(op, 'project_id'))
self.assertEqual(dag_id, getattr(op, 'zone'))
self.assertEqual(dag_id, getattr(op, 'resource_id'))
self.assertEqual(dag_id, getattr(op, 'gcp_conn_id'))
self.assertEqual(dag_id, getattr(op, 'api_version'))
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_stop_should_throw_ex_when_missing_project_id(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = GceInstanceStopOperator(
project_id="",
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
task_id='id'
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'project_id' is missing", str(err))
mock_hook.assert_not_called()
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_stop_should_not_throw_ex_when_project_id_none(self, mock_hook):
op = GceInstanceStopOperator(
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
task_id='id'
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.stop_instance.assert_called_once_with(
zone=GCE_ZONE, resource_id=RESOURCE_ID, project_id=None
)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_stop_should_throw_ex_when_missing_zone(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = GceInstanceStopOperator(
project_id=GCP_PROJECT_ID,
zone="",
resource_id=RESOURCE_ID,
task_id='id'
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'zone' is missing", str(err))
mock_hook.assert_not_called()
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_stop_should_throw_ex_when_missing_resource_id(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = GceInstanceStopOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id="",
task_id='id'
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'resource_id' is missing", str(err))
mock_hook.assert_not_called()
class TestGceInstanceSetMachineType(unittest.TestCase):
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_set_machine_type(self, mock_hook):
mock_hook.return_value.set_machine_type.return_value = True
op = GceSetMachineTypeOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
body=SET_MACHINE_TYPE_BODY,
task_id='id'
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.set_machine_type.assert_called_once_with(
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
body=SET_MACHINE_TYPE_BODY,
project_id=GCP_PROJECT_ID
)
# Setting all of the operator's input parameters as templated dag_ids
# (could be anything else) just to test if the templating works for all fields
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_set_machine_type_with_templates(self, _):
dag_id = 'test_dag_id'
args = {
'start_date': DEFAULT_DATE
}
self.dag = DAG(dag_id, default_args=args) # pylint: disable=attribute-defined-outside-init
op = GceSetMachineTypeOperator(
project_id='{{ dag.dag_id }}',
zone='{{ dag.dag_id }}',
resource_id='{{ dag.dag_id }}',
body={},
gcp_conn_id='{{ dag.dag_id }}',
api_version='{{ dag.dag_id }}',
task_id='id',
dag=self.dag
)
ti = TaskInstance(op, DEFAULT_DATE)
ti.render_templates()
self.assertEqual(dag_id, getattr(op, 'project_id'))
self.assertEqual(dag_id, getattr(op, 'zone'))
self.assertEqual(dag_id, getattr(op, 'resource_id'))
self.assertEqual(dag_id, getattr(op, 'gcp_conn_id'))
self.assertEqual(dag_id, getattr(op, 'api_version'))
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_set_machine_type_should_throw_ex_when_missing_project_id(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = GceSetMachineTypeOperator(
project_id="",
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
body=SET_MACHINE_TYPE_BODY,
task_id='id'
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'project_id' is missing", str(err))
mock_hook.assert_not_called()
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_set_machine_type_should_not_throw_ex_when_project_id_none(self, mock_hook):
op = GceSetMachineTypeOperator(
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
body=SET_MACHINE_TYPE_BODY,
task_id='id'
)
op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.set_machine_type.assert_called_once_with(
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
body=SET_MACHINE_TYPE_BODY,
project_id=None
)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_set_machine_type_should_throw_ex_when_missing_zone(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = GceSetMachineTypeOperator(
project_id=GCP_PROJECT_ID,
zone="",
resource_id=RESOURCE_ID,
body=SET_MACHINE_TYPE_BODY,
task_id='id'
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'zone' is missing", str(err))
mock_hook.assert_not_called()
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_set_machine_type_should_throw_ex_when_missing_resource_id(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = GceSetMachineTypeOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id="",
body=SET_MACHINE_TYPE_BODY,
task_id='id'
)
op.execute(None)
err = cm.exception
self.assertIn("The required parameter 'resource_id' is missing", str(err))
mock_hook.assert_not_called()
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_set_machine_type_should_throw_ex_when_missing_machine_type(self, mock_hook):
with self.assertRaises(AirflowException) as cm:
op = GceSetMachineTypeOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
body={},
task_id='id'
)
op.execute(None)
err = cm.exception
self.assertIn(
"The required body field 'machineType' is missing. Please add it.", str(err))
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
MOCK_OP_RESPONSE = "{'kind': 'compute#operation', 'id': '8529919847974922736', " \
"'name': " \
"'operation-1538578207537-577542784f769-7999ab71-94f9ec1d', " \
"'zone': 'https://www.googleapis.com/compute/v1/projects/example" \
"-project/zones/europe-west3-b', 'operationType': " \
"'setMachineType', 'targetLink': " \
"'https://www.googleapis.com/compute/v1/projects/example-project" \
"/zones/europe-west3-b/instances/pa-1', 'targetId': " \
"'2480086944131075860', 'status': 'DONE', 'user': " \
"'service-account@example-project.iam.gserviceaccount.com', " \
"'progress': 100, 'insertTime': '2018-10-03T07:50:07.951-07:00', "\
"'startTime': '2018-10-03T07:50:08.324-07:00', 'endTime': " \
"'2018-10-03T07:50:08.484-07:00', 'error': {'errors': [{'code': " \
"'UNSUPPORTED_OPERATION', 'message': \"Machine type with name " \
"'machine-type-1' does not exist in zone 'europe-west3-b'.\"}]}, "\
"'httpErrorStatusCode': 400, 'httpErrorMessage': 'BAD REQUEST', " \
"'selfLink': " \
"'https://www.googleapis.com/compute/v1/projects/example-project" \
"/zones/europe-west3-b/operations/operation-1538578207537" \
"-577542784f769-7999ab71-94f9ec1d'} "
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook'
'._check_zone_operation_status')
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook'
'._execute_set_machine_type')
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook.get_conn')
def test_set_machine_type_should_handle_and_trim_gce_error(
self, get_conn, _execute_set_machine_type, _check_zone_operation_status):
get_conn.return_value = {}
_execute_set_machine_type.return_value = {"name": "test-operation"}
_check_zone_operation_status.return_value = ast.literal_eval(
self.MOCK_OP_RESPONSE)
with self.assertRaises(AirflowException) as cm:
op = GceSetMachineTypeOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=RESOURCE_ID,
body=SET_MACHINE_TYPE_BODY,
task_id='id'
)
op.execute(None)
err = cm.exception
_check_zone_operation_status.assert_called_once_with(
{}, "test-operation", GCP_PROJECT_ID, GCE_ZONE, mock.ANY)
_execute_set_machine_type.assert_called_once_with(
GCE_ZONE, RESOURCE_ID, SET_MACHINE_TYPE_BODY, GCP_PROJECT_ID)
# Checking the full message was sometimes failing due to different order
# of keys in the serialized JSON
self.assertIn("400 BAD REQUEST: {", str(err)) # checking the square bracket trim
self.assertIn("UNSUPPORTED_OPERATION", str(err))
GCE_INSTANCE_TEMPLATE_NAME = "instance-template-test"
GCE_INSTANCE_TEMPLATE_NEW_NAME = "instance-template-test-new"
GCE_INSTANCE_TEMPLATE_REQUEST_ID = "e12d5b48-4826-4ba9-ada6-0cff1e0b36a6"
GCE_INSTANCE_TEMPLATE_BODY_GET = {
"kind": "compute#instanceTemplate",
"id": "6950321349997439715",
"creationTimestamp": "2018-10-15T06:20:12.777-07:00",
"name": GCE_INSTANCE_TEMPLATE_NAME,
"description": "",
"properties": {
"machineType": "n1-standard-1",
"networkInterfaces": [
{
"kind": "compute#networkInterface",
"network": "https://www.googleapis.com/compute/v1/"
"projects/project/global/networks/default",
"accessConfigs": [
{
"kind": "compute#accessConfig",
"type": "ONE_TO_ONE_NAT",
}
]
},
{
"network": "https://www.googleapis.com/compute/v1/"
"projects/project/global/networks/default",
"accessConfigs": [
{
"kind": "compute#accessConfig",
"networkTier": "PREMIUM"
}
]
}
],
"disks": [
{
"kind": "compute#attachedDisk",
"type": "PERSISTENT",
"licenses": [
"A String",
]
}
],
"metadata": {
"kind": "compute#metadata",
"fingerprint": "GDPUYxlwHe4="
},
},
"selfLink": "https://www.googleapis.com/compute/v1/projects/project"
"/global/instanceTemplates/instance-template-test"
}
GCE_INSTANCE_TEMPLATE_BODY_INSERT = {
"name": GCE_INSTANCE_TEMPLATE_NEW_NAME,
"description": "",
"properties": {
"machineType": "n1-standard-1",
"networkInterfaces": [
{
"network": "https://www.googleapis.com/compute/v1/"
"projects/project/global/networks/default",
"accessConfigs": [
{
"type": "ONE_TO_ONE_NAT",
}
]
},
{
"network": "https://www.googleapis.com/compute/v1/"
"projects/project/global/networks/default",
"accessConfigs": [
{
"networkTier": "PREMIUM"
}
]
}
],
"disks": [
{
"type": "PERSISTENT",
}
],
"metadata": {
"fingerprint": "GDPUYxlwHe4="
},
},
}
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW = deepcopy(GCE_INSTANCE_TEMPLATE_BODY_GET)
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW['name'] = GCE_INSTANCE_TEMPLATE_NEW_NAME
class TestGceInstanceTemplateCopy(unittest.TestCase):
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_copy_template(self, mock_hook):
mock_hook.return_value.get_instance_template.side_effect = [
HttpError(resp=httplib2.Response({'status': 404}), content=EMPTY_CONTENT),
GCE_INSTANCE_TEMPLATE_BODY_GET,
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW
]
op = GceInstanceTemplateCopyOperator(
project_id=GCP_PROJECT_ID,
resource_id=GCE_INSTANCE_TEMPLATE_NAME,
task_id='id',
body_patch={"name": GCE_INSTANCE_TEMPLATE_NEW_NAME}
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.insert_instance_template.assert_called_once_with(
project_id=GCP_PROJECT_ID,
body=GCE_INSTANCE_TEMPLATE_BODY_INSERT,
request_id=None
)
self.assertEqual(GCE_INSTANCE_TEMPLATE_BODY_GET_NEW, result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_copy_template_missing_project_id(self, mock_hook):
mock_hook.return_value.get_instance_template.side_effect = [
HttpError(resp=httplib2.Response({'status': 404}), content=EMPTY_CONTENT),
GCE_INSTANCE_TEMPLATE_BODY_GET,
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW
]
op = GceInstanceTemplateCopyOperator(
resource_id=GCE_INSTANCE_TEMPLATE_NAME,
task_id='id',
body_patch={"name": GCE_INSTANCE_TEMPLATE_NEW_NAME}
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.insert_instance_template.assert_called_once_with(
project_id=None,
body=GCE_INSTANCE_TEMPLATE_BODY_INSERT,
request_id=None
)
self.assertEqual(GCE_INSTANCE_TEMPLATE_BODY_GET_NEW, result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_idempotent_copy_template_when_already_copied(self, mock_hook):
mock_hook.return_value.get_instance_template.side_effect = [
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW
]
op = GceInstanceTemplateCopyOperator(
project_id=GCP_PROJECT_ID,
resource_id=GCE_INSTANCE_TEMPLATE_NAME,
task_id='id',
body_patch={"name": GCE_INSTANCE_TEMPLATE_NEW_NAME}
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.insert_instance_template.assert_not_called()
self.assertEqual(GCE_INSTANCE_TEMPLATE_BODY_GET_NEW, result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_copy_template_with_request_id(self, mock_hook):
mock_hook.return_value.get_instance_template.side_effect = [
HttpError(resp=httplib2.Response({'status': 404}), content=EMPTY_CONTENT),
GCE_INSTANCE_TEMPLATE_BODY_GET,
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW
]
op = GceInstanceTemplateCopyOperator(
project_id=GCP_PROJECT_ID,
resource_id=GCE_INSTANCE_TEMPLATE_NAME,
request_id=GCE_INSTANCE_TEMPLATE_REQUEST_ID,
task_id='id',
body_patch={"name": GCE_INSTANCE_TEMPLATE_NEW_NAME}
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.insert_instance_template.assert_called_once_with(
project_id=GCP_PROJECT_ID,
body=GCE_INSTANCE_TEMPLATE_BODY_INSERT,
request_id=GCE_INSTANCE_TEMPLATE_REQUEST_ID,
)
self.assertEqual(GCE_INSTANCE_TEMPLATE_BODY_GET_NEW, result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_copy_template_with_description_fields(self, mock_hook):
mock_hook.return_value.get_instance_template.side_effect = [
HttpError(resp=httplib2.Response({'status': 404}), content=EMPTY_CONTENT),
GCE_INSTANCE_TEMPLATE_BODY_GET,
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW
]
op = GceInstanceTemplateCopyOperator(
project_id=GCP_PROJECT_ID,
resource_id=GCE_INSTANCE_TEMPLATE_NAME,
request_id=GCE_INSTANCE_TEMPLATE_REQUEST_ID,
task_id='id',
body_patch={"name": GCE_INSTANCE_TEMPLATE_NEW_NAME,
"description": "New description"}
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
body_insert = deepcopy(GCE_INSTANCE_TEMPLATE_BODY_INSERT)
body_insert["description"] = "New description"
mock_hook.return_value.insert_instance_template.assert_called_once_with(
project_id=GCP_PROJECT_ID,
body=body_insert,
request_id=GCE_INSTANCE_TEMPLATE_REQUEST_ID,
)
self.assertEqual(GCE_INSTANCE_TEMPLATE_BODY_GET_NEW, result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_copy_with_some_validation_warnings(self, mock_hook):
mock_hook.return_value.get_instance_template.side_effect = [
HttpError(resp=httplib2.Response({'status': 404}), content=EMPTY_CONTENT),
GCE_INSTANCE_TEMPLATE_BODY_GET,
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW
]
op = GceInstanceTemplateCopyOperator(
project_id=GCP_PROJECT_ID,
resource_id=GCE_INSTANCE_TEMPLATE_NAME,
task_id='id',
body_patch={"name": GCE_INSTANCE_TEMPLATE_NEW_NAME,
"some_wrong_field": "test",
"properties": {
"some_other_wrong_field": "test"
}}
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
body_insert = deepcopy(GCE_INSTANCE_TEMPLATE_BODY_INSERT)
body_insert["some_wrong_field"] = "test"
body_insert["properties"]["some_other_wrong_field"] = "test"
mock_hook.return_value.insert_instance_template.assert_called_once_with(
project_id=GCP_PROJECT_ID,
body=body_insert,
request_id=None,
)
self.assertEqual(GCE_INSTANCE_TEMPLATE_BODY_GET_NEW, result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_copy_template_with_updated_nested_fields(self, mock_hook):
mock_hook.return_value.get_instance_template.side_effect = [
HttpError(resp=httplib2.Response({'status': 404}), content=EMPTY_CONTENT),
GCE_INSTANCE_TEMPLATE_BODY_GET,
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW
]
op = GceInstanceTemplateCopyOperator(
project_id=GCP_PROJECT_ID,
resource_id=GCE_INSTANCE_TEMPLATE_NAME,
task_id='id',
body_patch={
"name": GCE_INSTANCE_TEMPLATE_NEW_NAME,
"properties": {
"machineType": "n1-standard-2",
}
}
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
body_insert = deepcopy(GCE_INSTANCE_TEMPLATE_BODY_INSERT)
body_insert["properties"]["machineType"] = "n1-standard-2"
mock_hook.return_value.insert_instance_template.assert_called_once_with(
project_id=GCP_PROJECT_ID,
body=body_insert,
request_id=None
)
self.assertEqual(GCE_INSTANCE_TEMPLATE_BODY_GET_NEW, result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_copy_template_with_smaller_array_fields(self, mock_hook):
mock_hook.return_value.get_instance_template.side_effect = [
HttpError(resp=httplib2.Response({'status': 404}), content=EMPTY_CONTENT),
GCE_INSTANCE_TEMPLATE_BODY_GET,
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW
]
op = GceInstanceTemplateCopyOperator(
project_id=GCP_PROJECT_ID,
resource_id=GCE_INSTANCE_TEMPLATE_NAME,
task_id='id',
body_patch={
"name": GCE_INSTANCE_TEMPLATE_NEW_NAME,
"properties": {
"machineType": "n1-standard-1",
"networkInterfaces": [
{
"network": "https://www.googleapis.com/compute/v1/"
"projects/project/global/networks/default",
"accessConfigs": [
{
"type": "ONE_TO_ONE_NAT",
"natIP": "8.8.8.8"
}
]
}
]
}
}
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
body_insert = deepcopy(GCE_INSTANCE_TEMPLATE_BODY_INSERT)
body_insert["properties"]["networkInterfaces"] = [
{
"network": "https://www.googleapis.com/compute/v1/"
"projects/project/global/networks/default",
"accessConfigs": [
{
"type": "ONE_TO_ONE_NAT",
"natIP": "8.8.8.8"
}
]
}
]
mock_hook.return_value.insert_instance_template.assert_called_once_with(
project_id=GCP_PROJECT_ID,
body=body_insert,
request_id=None
)
self.assertEqual(GCE_INSTANCE_TEMPLATE_BODY_GET_NEW, result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_copy_template_with_bigger_array_fields(self, mock_hook):
mock_hook.return_value.get_instance_template.side_effect = [
HttpError(resp=httplib2.Response({'status': 404}), content=EMPTY_CONTENT),
GCE_INSTANCE_TEMPLATE_BODY_GET,
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW
]
op = GceInstanceTemplateCopyOperator(
project_id=GCP_PROJECT_ID,
resource_id=GCE_INSTANCE_TEMPLATE_NAME,
task_id='id',
body_patch={
"name": GCE_INSTANCE_TEMPLATE_NEW_NAME,
"properties": {
"disks": [
{
"kind": "compute#attachedDisk",
"type": "SCRATCH",
"licenses": [
"Updated String",
]
},
{
"kind": "compute#attachedDisk",
"type": "PERSISTENT",
"licenses": [
"Another String",
]
}
],
}
}
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='v1',
gcp_conn_id='google_cloud_default')
body_insert = deepcopy(GCE_INSTANCE_TEMPLATE_BODY_INSERT)
body_insert["properties"]["disks"] = [
{
"kind": "compute#attachedDisk",
"type": "SCRATCH",
"licenses": [
"Updated String",
]
},
{
"kind": "compute#attachedDisk",
"type": "PERSISTENT",
"licenses": [
"Another String",
]
}
]
mock_hook.return_value.insert_instance_template.assert_called_once_with(
project_id=GCP_PROJECT_ID,
body=body_insert,
request_id=None,
)
self.assertEqual(GCE_INSTANCE_TEMPLATE_BODY_GET_NEW, result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_missing_name(self, mock_hook):
mock_hook.return_value.get_instance_template.side_effect = [
HttpError(resp=httplib2.Response({'status': 404}), content=EMPTY_CONTENT),
GCE_INSTANCE_TEMPLATE_BODY_GET,
GCE_INSTANCE_TEMPLATE_BODY_GET_NEW
]
with self.assertRaises(AirflowException) as cm:
op = GceInstanceTemplateCopyOperator(
project_id=GCP_PROJECT_ID,
resource_id=GCE_INSTANCE_TEMPLATE_NAME,
request_id=GCE_INSTANCE_TEMPLATE_REQUEST_ID,
task_id='id',
body_patch={"description": "New description"}
)
op.execute(None)
err = cm.exception
self.assertIn("should contain at least name for the new operator "
"in the 'name' field", str(err))
mock_hook.assert_not_called()
GCE_INSTANCE_GROUP_MANAGER_NAME = "instance-group-test"
GCE_INSTANCE_TEMPLATE_SOURCE_URL = \
"https://www.googleapis.com/compute/beta/projects/project" \
"/global/instanceTemplates/instance-template-test"
GCE_INSTANCE_TEMPLATE_OTHER_URL = \
"https://www.googleapis.com/compute/beta/projects/project" \
"/global/instanceTemplates/instance-template-other"
GCE_INSTANCE_TEMPLATE_NON_EXISTING_URL = \
"https://www.googleapis.com/compute/beta/projects/project" \
"/global/instanceTemplates/instance-template-non-existing"
GCE_INSTANCE_TEMPLATE_DESTINATION_URL = \
"https://www.googleapis.com/compute/beta/projects/project" \
"/global/instanceTemplates/instance-template-new"
GCE_INSTANCE_GROUP_MANAGER_GET = {
"kind": "compute#instanceGroupManager",
"id": "2822359583810032488",
"creationTimestamp": "2018-10-17T05:39:35.793-07:00",
"name": GCE_INSTANCE_GROUP_MANAGER_NAME,
"zone": "https://www.googleapis.com/compute/beta/projects/project/zones/zone",
"instanceTemplate": GCE_INSTANCE_TEMPLATE_SOURCE_URL,
"versions": [
{
"name": "v1",
"instanceTemplate": GCE_INSTANCE_TEMPLATE_SOURCE_URL,
"targetSize": {
"calculated": 1
}
},
{
"name": "v2",
"instanceTemplate": GCE_INSTANCE_TEMPLATE_OTHER_URL,
}
],
"instanceGroup": GCE_INSTANCE_TEMPLATE_SOURCE_URL,
"baseInstanceName": GCE_INSTANCE_GROUP_MANAGER_NAME,
"fingerprint": "BKWB_igCNbQ=",
"currentActions": {
"none": 1,
"creating": 0,
"creatingWithoutRetries": 0,
"verifying": 0,
"recreating": 0,
"deleting": 0,
"abandoning": 0,
"restarting": 0,
"refreshing": 0
},
"pendingActions": {
"creating": 0,
"deleting": 0,
"recreating": 0,
"restarting": 0
},
"targetSize": 1,
"selfLink": "https://www.googleapis.com/compute/beta/projects/project/zones/"
"zone/instanceGroupManagers/" + GCE_INSTANCE_GROUP_MANAGER_NAME,
"autoHealingPolicies": [
{
"initialDelaySec": 300
}
],
"serviceAccount": "198907790164@cloudservices.gserviceaccount.com"
}
GCE_INSTANCE_GROUP_MANAGER_EXPECTED_PATCH = {
"instanceTemplate": GCE_INSTANCE_TEMPLATE_DESTINATION_URL,
"versions": [
{
"name": "v1",
"instanceTemplate": GCE_INSTANCE_TEMPLATE_DESTINATION_URL,
"targetSize": {
"calculated": 1
}
},
{
"name": "v2",
"instanceTemplate": GCE_INSTANCE_TEMPLATE_OTHER_URL,
}
],
}
GCE_INSTANCE_GROUP_MANAGER_REQUEST_ID = "e12d5b48-4826-4ba9-ada6-0cff1e0b36a6"
GCE_INSTANCE_GROUP_MANAGER_UPDATE_POLICY = {
"type": "OPPORTUNISTIC",
"minimalAction": "RESTART",
"maxSurge": {
"fixed": 1
},
"maxUnavailable": {
"percent": 10
},
"minReadySec": 1800
}
class TestGceInstanceGroupManagerUpdate(unittest.TestCase):
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_instance_group_update(self, mock_hook):
mock_hook.return_value.get_instance_group_manager.return_value = \
deepcopy(GCE_INSTANCE_GROUP_MANAGER_GET)
op = GceInstanceGroupManagerUpdateTemplateOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
task_id='id',
source_template=GCE_INSTANCE_TEMPLATE_SOURCE_URL,
destination_template=GCE_INSTANCE_TEMPLATE_DESTINATION_URL
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='beta',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.patch_instance_group_manager.assert_called_once_with(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
body=GCE_INSTANCE_GROUP_MANAGER_EXPECTED_PATCH,
request_id=None
)
self.assertTrue(result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_instance_group_update_missing_project_id(self, mock_hook):
mock_hook.return_value.get_instance_group_manager.return_value = \
deepcopy(GCE_INSTANCE_GROUP_MANAGER_GET)
op = GceInstanceGroupManagerUpdateTemplateOperator(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
task_id='id',
source_template=GCE_INSTANCE_TEMPLATE_SOURCE_URL,
destination_template=GCE_INSTANCE_TEMPLATE_DESTINATION_URL
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='beta',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.patch_instance_group_manager.assert_called_once_with(
project_id=None,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
body=GCE_INSTANCE_GROUP_MANAGER_EXPECTED_PATCH,
request_id=None
)
self.assertTrue(result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_instance_group_update_no_instance_template_field(self, mock_hook):
instance_group_manager_no_template = deepcopy(GCE_INSTANCE_GROUP_MANAGER_GET)
del instance_group_manager_no_template['instanceTemplate']
mock_hook.return_value.get_instance_group_manager.return_value = \
instance_group_manager_no_template
op = GceInstanceGroupManagerUpdateTemplateOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
task_id='id',
source_template=GCE_INSTANCE_TEMPLATE_SOURCE_URL,
destination_template=GCE_INSTANCE_TEMPLATE_DESTINATION_URL
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='beta',
gcp_conn_id='google_cloud_default')
expected_patch_no_instance_template = \
deepcopy(GCE_INSTANCE_GROUP_MANAGER_EXPECTED_PATCH)
del expected_patch_no_instance_template['instanceTemplate']
mock_hook.return_value.patch_instance_group_manager.assert_called_once_with(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
body=expected_patch_no_instance_template,
request_id=None
)
self.assertTrue(result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_instance_group_update_no_versions_field(self, mock_hook):
instance_group_manager_no_versions = deepcopy(GCE_INSTANCE_GROUP_MANAGER_GET)
del instance_group_manager_no_versions['versions']
mock_hook.return_value.get_instance_group_manager.return_value = \
instance_group_manager_no_versions
op = GceInstanceGroupManagerUpdateTemplateOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
task_id='id',
source_template=GCE_INSTANCE_TEMPLATE_SOURCE_URL,
destination_template=GCE_INSTANCE_TEMPLATE_DESTINATION_URL
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='beta',
gcp_conn_id='google_cloud_default')
expected_patch_no_versions = \
deepcopy(GCE_INSTANCE_GROUP_MANAGER_EXPECTED_PATCH)
del expected_patch_no_versions['versions']
mock_hook.return_value.patch_instance_group_manager.assert_called_once_with(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
body=expected_patch_no_versions,
request_id=None
)
self.assertTrue(result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_instance_group_update_with_update_policy(self, mock_hook):
mock_hook.return_value.get_instance_group_manager.return_value = \
deepcopy(GCE_INSTANCE_GROUP_MANAGER_GET)
op = GceInstanceGroupManagerUpdateTemplateOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
task_id='id',
update_policy=GCE_INSTANCE_GROUP_MANAGER_UPDATE_POLICY,
source_template=GCE_INSTANCE_TEMPLATE_SOURCE_URL,
destination_template=GCE_INSTANCE_TEMPLATE_DESTINATION_URL
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='beta',
gcp_conn_id='google_cloud_default')
expected_patch_with_update_policy = \
deepcopy(GCE_INSTANCE_GROUP_MANAGER_EXPECTED_PATCH)
expected_patch_with_update_policy['updatePolicy'] = GCE_INSTANCE_GROUP_MANAGER_UPDATE_POLICY
mock_hook.return_value.patch_instance_group_manager.assert_called_once_with(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
body=expected_patch_with_update_policy,
request_id=None
)
self.assertTrue(result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_successful_instance_group_update_with_request_id(self, mock_hook):
mock_hook.return_value.get_instance_group_manager.return_value = \
deepcopy(GCE_INSTANCE_GROUP_MANAGER_GET)
op = GceInstanceGroupManagerUpdateTemplateOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
task_id='id',
source_template=GCE_INSTANCE_TEMPLATE_SOURCE_URL,
request_id=GCE_INSTANCE_GROUP_MANAGER_REQUEST_ID,
destination_template=GCE_INSTANCE_TEMPLATE_DESTINATION_URL
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='beta',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.patch_instance_group_manager.assert_called_once_with(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
body=GCE_INSTANCE_GROUP_MANAGER_EXPECTED_PATCH,
request_id=GCE_INSTANCE_GROUP_MANAGER_REQUEST_ID
)
self.assertTrue(result)
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_try_to_use_api_v1(self, _):
with self.assertRaises(AirflowException) as cm:
GceInstanceGroupManagerUpdateTemplateOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
task_id='id',
api_version='v1',
source_template=GCE_INSTANCE_TEMPLATE_SOURCE_URL,
destination_template=GCE_INSTANCE_TEMPLATE_DESTINATION_URL
)
err = cm.exception
self.assertIn("Use beta api version or above", str(err))
@mock.patch('airflow.gcp.operators.compute.ComputeEngineHook')
def test_try_to_use_non_existing_template(self, mock_hook):
mock_hook.return_value.get_instance_group_manager.return_value = \
deepcopy(GCE_INSTANCE_GROUP_MANAGER_GET)
op = GceInstanceGroupManagerUpdateTemplateOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER_NAME,
task_id='id',
source_template=GCE_INSTANCE_TEMPLATE_NON_EXISTING_URL,
destination_template=GCE_INSTANCE_TEMPLATE_DESTINATION_URL
)
result = op.execute(None)
mock_hook.assert_called_once_with(api_version='beta',
gcp_conn_id='google_cloud_default')
mock_hook.return_value.patch_instance_group_manager.assert_not_called()
self.assertTrue(result)
|
|
from base64 import urlsafe_b64decode, urlsafe_b64encode
import config
from datetime import datetime
import flask
import GeoIP
import geoip2.errors
import geoip2.database
import glob
import hashlib
import json
import logging
from netaddr import IPNetwork
import os
import random
import re
import requests
import string
from werkzeug import secure_filename
# local imports
from centinel import constants
from centinel.models import Client, Role
import centinel
app = centinel.app
db = centinel.db
auth = centinel.auth
try:
reader = geoip2.database.Reader(config.maxmind_db)
except (geoip2.database.maxminddb.InvalidDatabaseError, IOError):
logging.warning("You appear to have an error in your geolocation "
"database. Your database is either corrupt or does not "
"exist until you download a new copy, geolocation "
"functionality will be disabled.")
reader = None
try:
logging.info("Loading AS info database...")
as_lookup = GeoIP.open("/opt/centinel-server/asn-db.dat",
GeoIP.GEOIP_STANDARD)
logging.info("Done loading AS info database.")
except Exception as exp:
logging.warning(("Error loading ASN lookup information. You need a copy "
"of each ASN database file to enable this feature."))
as_lookup = None
def normalize_ip(ip):
"""Take in an IP as a string in CIDR format or without subnet
and normalize to a single IP for lookups
"""
net = IPNetwork(ip)
ip = str(net[0])
return ip
def get_country_from_ip(ip):
"""Return the country for the given ip"""
ip = normalize_ip(ip)
try:
return reader.country(ip).country.iso_code
# if we have disabled geoip support, reader should be None, so the
# exception should be triggered
except (geoip2.errors.AddressNotFoundError,
geoip2.errors.GeoIP2Error, AttributeError):
return '--'
def get_asn_from_ip(ip, asn_reg=re.compile("AS(?P<asn>[0-9]+)")):
"""Get the owner and ASN for the IP"""
ip = normalize_ip(ip)
if as_lookup is None:
return None, None
owner = as_lookup.org_by_addr(ip)
asn = None
if owner is not None:
asn = asn_reg.match(owner).group('asn')
return asn, owner
def generate_typeable_handle(length=8):
"""Generate a random typeable (a-z, 1-9) string for consent URL."""
return "".join([random.choice(string.digits +
string.ascii_lowercase) for _ in range(length)])
def update_client_info(username, ip, country=None):
"""Update client's information upon contact.
This information includes their IP address,
time when last seen, and country.
Params:
username- username of the client who contaced.
ip- IP address of the client
"""
client = Client.query.filter_by(username=username).first()
if client is None:
# this should never happen
return
# aggregate the ip to /24
client.last_ip = ".".join(ip.split(".")[:3]) + ".0/24"
client.last_seen = datetime.now()
# if the client explicitely sets their country,
# update the value based on that (used by VPN).
if country is not None:
client.country = country
else:
# don't update country for VPN clients unless
# it was manually set.
# this way we avoid changing country value when
# uploading results without VPN connection.
if not client.is_vpn:
client.country = get_country_from_ip(ip)
db.session.commit()
@app.errorhandler(404)
def not_found(error):
return flask.make_response(flask.jsonify({'error': 'Not found'}), 404)
@app.errorhandler(400)
def bad_request(error):
return flask.make_response(flask.jsonify({'error': 'Bad request'}), 400)
@app.errorhandler(418)
def no_consent(error):
return flask.make_response(flask.jsonify({'error': 'Consent not given'}),
418)
@auth.error_handler
def unauthorized():
json_resp = flask.jsonify({'error': 'Unauthorized access'})
return flask.make_response(json_resp, 401)
@app.route("/version")
def get_recommended_version():
return flask.jsonify({"version": config.recommended_version})
@app.route("/results", methods=['POST'])
@auth.login_required
def submit_result():
update_client_info(flask.request.authorization.username,
flask.request.remote_addr)
# abort if there is no result file
if not flask.request.files:
flask.abort(400)
# make sure the informed consent has been given before we proceed
username = flask.request.authorization.username
client = Client.query.filter_by(username=username).first()
if not client.has_given_consent:
flask.abort(418)
# TODO: overwrite file if exists?
result_file = flask.request.files['result']
client_dir = username
# we assume that the directory was created when the user
# registered
file_name = secure_filename(result_file.filename)
file_path = os.path.join(config.results_dir, client_dir, file_name)
result_file.save(file_path)
return flask.jsonify({"status": "success"}), 201
@app.route("/results")
@auth.login_required
def get_results():
update_client_info(flask.request.authorization.username,
flask.request.remote_addr)
results = {}
# TODO: cache the list of results?
# TODO: let the admin query any results file here?
# look in results directory for the user's results (we assume this
# directory was created when the user registered)
username = flask.request.authorization.username
user_dir = os.path.join(config.results_dir, username, '[!_]*.json')
for path in glob.glob(user_dir):
file_name, ext = os.path.splitext(os.path.basename(path))
with open(path) as result_file:
try:
results[file_name] = json.load(result_file)
except Exception, e:
logging.error("Results: Couldn't open results file - %s - %s"
% (path, str(e)))
return flask.jsonify({"results": results})
def get_user_specific_content(folder, filename=None, json_var=None):
"""Perform the functionality of get_experiments and get_inputs_files
Params:
filename- the name of the file to retrieve or None to fetch the
hashes of all the files
folder- the directory that the user's directory is contained in
json_var- the name of the json variable to return containing the
list of hashes
"""
username = flask.request.authorization.username
# make sure the informed consent has been given before we proceed
client = Client.query.filter_by(username=username).first()
if not client.has_given_consent:
flask.abort(418)
# all of the scheduler files are combined together here.
# this is run every time the experiment list or "scheduler.info"
# is requested.
if (json_var == "experiments" and
(filename is None or filename == "scheduler.info")):
global_scheduler_filename = os.path.join(config.experiments_dir,
"global", "scheduler.info")
country_scheduler_filename = os.path.join(config.experiments_dir,
client.country,
"scheduler.info")
client_scheduler_filename = os.path.join(config.experiments_dir,
username, "scheduler.info")
freqs = {}
if os.path.exists(global_scheduler_filename):
with open(global_scheduler_filename, 'r') as file_p:
freqs.update(json.load(file_p))
if os.path.exists(country_scheduler_filename):
with open(country_scheduler_filename, 'r') as file_p:
freqs.update(json.load(file_p))
if os.path.exists(client_scheduler_filename):
with open(client_scheduler_filename, 'r') as file_p:
freqs.update(json.load(file_p))
files = {}
# include global baseline content
global_dir = os.path.join(folder, "global")
if os.path.exists(global_dir):
for path in glob.glob(os.path.join(global_dir, "*")):
file_name = os.path.basename(path)
files[file_name] = path
else:
logging.warning("Global baseline folder \"%s\" "
"doesn't exist!" % (global_dir))
# include country-specific baseline content
country_specific_dir = os.path.join(folder, client.country)
if os.path.exists(country_specific_dir):
# if baseline experiments exist for this country (==folder exists),
# sync up all of the files in that dir.
for path in glob.glob(os.path.join(country_specific_dir, "*")):
file_name = os.path.basename(path)
files[file_name] = path
else:
logging.warning("Country baseline folder %s "
"doesn't exist!" % (country_specific_dir))
user_dir = os.path.join(folder, username, '*')
for path in glob.glob(user_dir):
file_name = os.path.basename(path)
files[file_name] = path
if filename is None:
for filename in files:
with open(files[filename], 'r') as file_p:
hash_val = hashlib.md5(file_p.read()).digest()
files[filename] = urlsafe_b64encode(hash_val)
return flask.jsonify({json_var: files})
# this should never happen, but better be safe
if '..' in filename or filename.startswith('/'):
flask.abort(404)
# we have to make a special case for scheduler.info
# and send the composition of all 3 files as one file
if filename == "scheduler.info":
scheduler = json.dumps(freqs)
response = flask.make_response(scheduler)
response.headers["Content-Disposition"] = ("attachment; "
"filename=scheduler.info")
return response
if filename in files:
# send requested experiment file
return flask.send_file(files[filename])
else:
# not found
flask.abort(404)
# in case the client wants to specify the country explicitly (VPN).
@app.route("/set_country/<country>")
@auth.login_required
def set_country(country):
if country is None:
flask.abort(404)
try:
update_client_info(flask.request.authorization.username,
flask.request.remote_addr, country)
except Exception as exp:
logging.error("Error setting country"
" %s: %s" % (country, exp))
return flask.jsonify({"status": "failure"}), 400
return flask.jsonify({"status": "success"}), 200
# in case the client wants to specify the IP address explicitly (VPN).
@app.route("/set_ip/<ip_address>")
@auth.login_required
def set_ip(ip_address):
if ip_address is None:
flask.abort(404)
try:
update_client_info(flask.request.authorization.username,
ip=ip_address)
except Exception as exp:
logging.error("Error setting IP address"
" %s: %s" % (ip_address, exp))
return flask.jsonify({"status": "failure"}), 400
return flask.jsonify({"status": "success"}), 200
@app.route("/experiments")
@app.route("/experiments/<name>")
@auth.login_required
def get_experiments(name=None):
update_client_info(flask.request.authorization.username,
flask.request.remote_addr)
return get_user_specific_content(config.experiments_dir, filename=name,
json_var="experiments")
@app.route("/input_files")
@app.route("/input_files/<name>")
@auth.login_required
def get_inputs(name=None):
update_client_info(flask.request.authorization.username,
flask.request.remote_addr)
return get_user_specific_content(config.inputs_dir, filename=name,
json_var="inputs")
@app.route("/clients")
def get_system_status():
"""This is a list of clients and the countries from which they last
connected and when. This does not require authentication as it
doesn't reveal anything important (e.g. IP address, username, etc.).
The list is shuffled each time so that numbers are randomly assigned.
Note: we don't display clients who have the dont_display column
set to 1/True
"""
clients = Client.query.all()
random.shuffle(clients)
results = []
number = 0
for client in clients:
# dont add clients who have asked not to be displayed
if client.dont_display:
continue
info = {}
info['num'] = number
info['country'] = client.country
if client.last_seen is not None:
info['last_seen'] = str(client.last_seen.date())
else:
continue
info['is_vpn'] = client.is_vpn
info['as_number'] = 0
info['as_owner'] = ""
try:
asn, owner = get_asn_from_ip(client.last_ip)
info['as_number'] = asn
info['as_owner'] = owner.decode('utf-8', 'ignore')
except Exception as exp:
logging.error("Error looking up AS info for "
"%s: %s" % (client.last_ip, exp))
results.append(info)
number += 1
return flask.jsonify({"clients": results})
@app.route("/client_details")
@auth.login_required
def get_clients():
"""This is a list of clients that is fully detailed.
This requires both authentication and admin-level access.
"""
update_client_info(flask.request.authorization.username,
flask.request.remote_addr)
# ensure that the client has the admin role
username = flask.request.authorization.username
user = Client.query.filter_by(username=username).first()
admin = Role.query.filter_by(name='admin').first()
if user not in admin.users:
return unauthorized()
results = []
clients = Client.query.all()
for client in clients:
info = {}
info['username'] = client.username
info['handle'] = client.typeable_handle
info['country'] = client.country
info['registered_date'] = client.registered_date
info['last_seen'] = client.last_seen
info['last_ip'] = client.last_ip
info['is_vpn'] = client.is_vpn
info['has_given_consent'] = client.has_given_consent
info['date_given_consent'] = client.date_given_consent
results.append(info)
return flask.jsonify({"clients": results})
@app.route("/register", methods=["POST"])
def register():
# TODO: use a captcha to prevent spam?
if not flask.request.json:
flask.abort(404)
ip = flask.request.remote_addr
# parse the info we need out of the json
client_json = flask.request.get_json()
username = client_json.get('username')
password = client_json.get('password')
# if the user didn't specify which country they were coming from,
# pull it from geolocation on their ip
country = client_json.get('country')
if country is None or (len(country) != 2):
client_json['country'] = get_country_from_ip(ip)
client_json['ip'] = ip
client_json['last_seen'] = datetime.now()
client_json['registered_date'] = datetime.now()
client_json['has_given_consent'] = False
# a VPN client does not need to give consent
if client_json.get('is_vpn'):
client_json['is_vpn'] = True
client_json['has_given_consent'] = True
client_json['date_given_consent'] = datetime.now()
else:
client_json['is_vpn'] = False
client_json['roles'] = ['client']
if not username or not password:
flask.abort(400)
client = Client.query.filter_by(username=username).first()
if client is not None:
flask.abort(400)
# create a typeable handle to put in the consent form URL
typeable_handle = generate_typeable_handle(length=8)
client = Client.query.filter_by(typeable_handle=typeable_handle).first()
# if there is a collision, generate another one
while client is not None:
type_hand = generate_typeable_handle(length=8)
client = Client.query.filter_by(typeable_handle=type_hand).first()
client_json['typeable_handle'] = typeable_handle
user = Client(**client_json)
db.session.add(user)
db.session.commit()
os.makedirs(os.path.join(config.results_dir, username))
os.makedirs(os.path.join(config.experiments_dir, username))
os.makedirs(os.path.join(config.inputs_dir, username))
ret_json = {"status": "success", "typeable_handle": typeable_handle}
return flask.jsonify(ret_json), 201
@app.route("/meta/")
@app.route("/meta/<custom_ip>")
def geolocate(custom_ip=None):
# this will return metadata about a client's IP
# address and the current server time. this info
# can be appended to experiment results.
if custom_ip is not None:
ip = custom_ip
else:
ip = flask.request.remote_addr
results = {}
ip_aggr = ip
results['country'] = ''
try:
# aggregate ip to the /24
ip_aggr = '.'.join(ip.split('.')[:3]) + '.0/24'
country = get_country_from_ip(ip)
results['country'] = country
except Exception as exp:
logging.error('Error looking up country for '
'%s: %s' % (ip, exp))
results['country_error'] = str(exp)
results['ip'] = ip_aggr
results['as_number'] = 0
results['as_owner'] = ''
try:
asn, owner = get_asn_from_ip(ip)
results['as_number'] = asn
results['as_owner'] = owner.decode('utf-8', 'ignore')
except Exception as exp:
logging.error('Error looking up AS info for '
'%s: %s' % (ip, exp))
results['asn_error'] = str(exp)
results['server_time'] = datetime.now().isoformat()
return flask.jsonify(results)
def display_consent_page(username, path, freedom_url=''):
# insert a hidden field into the form with the user's username
with open(path, 'r') as file_p:
initial_page = file_p.read()
initial_page = initial_page.decode('utf-8')
replace_field = u'replace-with-username-value'
initial_page = initial_page.replace(replace_field,
urlsafe_b64encode(username))
replace_field = u'replace-with-human-readable-username-value'
initial_page = initial_page.replace(replace_field, (username))
if freedom_url != '':
freedom_replacement = u'replace-this-with-freedom-house'
initial_page = initial_page.replace(freedom_replacement,
u'static/' + freedom_url)
return initial_page
@app.route("/consent/<typeable_handle>")
def get_initial_informed_consent_with_handle(typeable_handle):
if typeable_handle is None:
flask.abort(404)
client = Client.query.filter_by(typeable_handle=typeable_handle).first()
if client is None:
flask.abort(404)
if client.has_given_consent:
return "Consent already given."
username = client.username
if config.prefetch_freedomhouse:
return display_consent_page(username,
'static/initial_informed_consent.html')
else:
return display_consent_page(username,
'static/no_prefetch_informed_consent.html')
@app.route("/get_initial_consent")
def get_initial_informed_consent():
username = flask.request.args.get('username')
if username is None:
flask.abort(404)
username = urlsafe_b64decode(str(username))
client = Client.query.filter_by(username=username).first()
if client is None:
flask.abort(404)
if client.has_given_consent:
return "Consent already given."
return display_consent_page(username,
'static/initial_informed_consent.html')
@app.route("/get_informed_consent_for_country")
def get_country_specific_consent():
username = flask.request.args.get('username')
country = flask.request.args.get('country')
if username is None or country is None:
flask.abort(404)
username = urlsafe_b64decode(str(username))
client = Client.query.filter_by(username=username).first()
if client is None:
flask.abort(404)
if client.has_given_consent:
return "Consent already given."
country = str(country).upper()
if country not in constants.freedom_house_lookup:
flask.abort(404)
# if we don't already have the content from freedom house, fetch
# it, then host it locally and insert it into the report
freedom_url = "".join(["freedom_house_", country, ".html"])
filename = os.path.join("static", freedom_url)
# get the content from freedom house if we don't already have it
get_page_and_strip_bad_content(constants.freedom_house_url(country),
filename)
page_path = 'static/informed_consent.html'
page_content = display_consent_page(username, page_path, freedom_url)
flask.url_for('static', filename=freedom_url)
flask.url_for('static', filename='economistDemocracyIndex.pdf')
flask.url_for('static', filename='consent.js')
return page_content
def get_page_and_strip_bad_content(url, filename):
"""Get the given page, strip out all requests back to the original
domain (identified via src tags), and write out the page
Note: this will break stuff, but that is better than letting the
domain know where and who our clients are
Note: we expect the content to be fairly static, so we don't
refetch it if we already have it
"""
# if os.path.exists(filename):
# return
req = requests.get(url)
# replace external links with a blank reference (sucks for the
# rendering engine to figure out, but hey, they get paid to work
# that out)
# also remove form tags and scripts
sub_flags = re.MULTILINE | re.DOTALL
replace_src = r'src\s*=\s*".*?"'
page = re.sub(replace_src, "", req.content, flags=sub_flags)
replace_href = r'href\s*=\s*".*?"'
page = re.sub(replace_href, "", page, flags=sub_flags)
replace_script = r'<\s*script.*?>.*?</\s*script\s*>'
page = re.sub(replace_script, "", page, flags=sub_flags)
replace_form = r'<\s*form.*?>.*?</\s*form\s*>'
page = re.sub(replace_form, "", page, flags=sub_flags)
with open(filename, 'w') as file_p:
file_p.write(page)
@app.route("/submit_consent")
def update_informed_consent():
username = flask.request.args.get('username')
if username is None:
flask.abort(404)
username = urlsafe_b64decode(str(username))
client = Client.query.filter_by(username=username).first()
if client is None:
flask.abort(404)
if client.has_given_consent:
return "Consent already given."
client.has_given_consent = True
client.date_given_consent = datetime.now().date()
db.session.commit()
response = ("Success! Thanks for registering; you are ready to start "
"sending us censorship measurement results.")
return response
@auth.verify_password
def verify_password(username, password):
if (len(username) == 0) and (len(password) == 0):
logging.warning(("Username and password are both empty. Are you sure "
"that you enabled the WSGI option for HTTP "
"authentication?\n"
"Add WSGIPassAuthorization On to your WSGI config "
"file under enabled-sites in Apache"))
user = Client.query.filter_by(username=username).first()
return user and user.verify_password(password)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 NaviNet Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Watches directories and sends images to Eyes.
"""
import argparse
from distutils import dir_util
import logging
import os
import re
import Queue
import shutil
import threading
import time
from applitools import errors
from applitools import eyes
import glob
from watchdog import events
import eyeswrapper
import watchdir
_DONE_BASE_NAME = 'done'
_FAILURE_DIR_NAME = 'FAILED'
_SUCCESS_DIR_NAME = 'DONE'
_INDEX = None # A nonnegative integer, or None to disable indexing
_DEFAULT_SEP = '_'
_LOGGER = logging.getLogger(__name__)
_TIMEOUT = 300 # In seconds
# The Applitools Eyes Team License limits the number of concurrent
# tests to n + 1, where n is the number of team members. However,
# Applitools does not enforce this limit; until they do, we are free to
# test as much as we want.
_MAX_CONCURRENT_TESTS = 0
_CONCURRENT_TEST_QUEUE = None
def _make_empty_directory(path):
"""Clears a directory or deletes a regular file.
Deletes whatever the path refers to (if anything) and creates an
empty directory at that path.
Args:
path: The path to make point to an empty directory.
"""
_LOGGER.debug('Clearing directory: {}'.format(path))
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.remove(path)
dir_util.mkpath(path)
class DirectoryGlobEventHandler(events.FileSystemEventHandler):
"""Event handler for new directories matching a glob.
"""
def __init__(self, stop_event, **kwargs):
"""Initializes the event handler.
Args:
stop_event: An Event to set to stop watching.
batch_info: A BatchInfo or None.
base_path: The literal existing part of the watched
directory.
host_app: A browser name or None.
host_os: An OS name or None.
overwrite_baseline: Whether to overwrite the baseline.
patterns: An iterable of file name globs.
sep: The host information separator, set by --sep.
test_name: The name of the Eyes test.
"""
self._patterns = kwargs.pop('patterns', ['*'])
self._batch_info = kwargs.pop('batch_info', None)
self._host_app = kwargs.pop('host_app', None)
self._host_os = kwargs.pop('host_os', None)
self._sep = kwargs.pop('sep', _DEFAULT_SEP)
self._base_path = kwargs.pop('base_path')
self._stop_event = stop_event
processing_dir = os.path.join(os.path.dirname(self._base_path),
watchdir.PROCESSING_DIR_NAME)
if os.path.isfile(processing_dir):
os.remove(processing_dir)
_LOGGER.info('Processing directory: {}'.format(processing_dir))
super(self.__class__, self).__init__(**kwargs)
if (self._base_path == self._patterns[0] and
os.path.isdir(self._base_path)):
# Watch a non-glob immediately.
self._watch(self._base_path)
stop_event.set()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def on_created(self, event):
"""Handles the creation of a new file.
If the new file is a directory and it matches one of the event
handler's globs, it watches it for new images to send to Eyes.
Args:
event: The Event representing the creation of a new file.
"""
src_path = event.src_path
matched_pattern = _matches_any_pattern(src_path, self._patterns)
if matched_pattern:
_LOGGER.info('Created: {} (matching {})'.format(src_path,
matched_pattern))
if event.is_directory:
self._watch(src_path)
else:
_LOGGER.warn('Not a directory: {}'.format(src_path))
def _watch(self, src_path):
"""Watches a directory to send new images to Eyes.
Args:
src_path: The path to watch.
"""
host_os, host_app = _get_app_environment(src_path, self._sep)
watchdir.watch(src_path, WindowMatchingEventHandler,
base_path=self._base_path, batch_info=self._batch_info,
host_app=self._host_app or host_app,
host_os=self._host_os or host_os,
watched_path=src_path, test_name=src_path)
class _GrowingList(list):
"""List that grows when needed.
"""
def __setitem__(self, index, value):
"""Sets the value at an index.
If the index is out of bounds, grows the list to be long
enough, filling unspecified indexes with None.
Args:
index: An index.
value: A value.
"""
if index >= len(self):
self.extend([None] * (index + 1 - len(self)))
super(self.__class__, self).__setitem__(index, value)
class WindowMatchingEventHandler(eyeswrapper.EyesWrapper,
watchdir.CreationEventHandler):
"""Event handler for moving new files and uploading them to Eyes.
"""
def __init__(self, stop_event, **kwargs):
"""Initializes the event handler.
Args:
stop_event: An Event to set when it is time to stop
watching.
"""
# pylint: disable=super-init-not-called
self._next_index = _INDEX or 0
self._path_cache = _GrowingList()
self._stop_event = stop_event
self._timer = None
for base in self.__class__.__bases__:
base.__init__(self, **kwargs)
def _process(self):
"""Sends new files to Applitools.
Each image file may include an integer index somewhere in its
name. If enabled by --index, this method uploads them in order
of their indexes. If two files include the same integer, only
the first is used.
Stops watching when the "done" file (set by --done) appears in
the queue or when the time-out interval passes without a new
file appearing.
"""
while not self.driver:
# Wait for Eyes to have opened.
time.sleep(0.1)
_CONCURRENT_TEST_QUEUE.put(None)
while True:
path = self._backlog.get()
if self._timer:
self._timer.cancel()
basename = os.path.basename(path)
if basename == _DONE_BASE_NAME:
self._stop()
break
match = _INDEX is None or re.search(r'\d+', basename)
if match:
# The file has an index and should be uploaded, or
# indexing has been disabled.
if _INDEX is None:
matched_index = self._next_index
else:
matched_index = int(match.group())
if matched_index < self._next_index:
_LOGGER.warn(
'Ignoring file with repeated index: {}'.format(path))
else:
self._path_cache[matched_index] = path
# Upload as many files from the cache as possible
# without skipping any indexes.
try:
while self._path_cache[self._next_index]:
eyeswrapper.match(
self.eyes, self._path_cache[self._next_index])
self._next_index += 1
except IndexError:
# We have run off the end of the cache. This is
# expected when the cache has no holes in it.
pass
else:
_LOGGER.warn('No index in file name: {}'.format(path))
_LOGGER.debug('File cache, starting at index {}: {}'.format(
self._next_index + 1, self._path_cache[self._next_index + 1:]))
_LOGGER.debug('Setting timer for {} s'.format(_TIMEOUT))
self._timer = threading.Timer(_TIMEOUT, self._time_out)
self._timer.start()
def _time_out(self):
"""Stop watching because of a time-out.
"""
_LOGGER.debug('Timing out')
self._stop()
def _stop(self):
"""Stops watching.
"""
self._stop_event.set()
def __exit__(self, exc_type, exc_value, traceback):
"""Ends the Eyes test and moves files.
Moves files on completion of a test. The destination directory
depends on whether the Eyes test succeeded or failed.
Args:
exc_type: The type of the raised exception.
exc_value: The raised exception.
traceback: The traceback.
"""
try:
# Upload whatever files are left.
for path in self._path_cache[self._next_index:]:
if path:
eyeswrapper.match(self.eyes, path)
# Allow another path to be watched.
_CONCURRENT_TEST_QUEUE.get()
_CONCURRENT_TEST_QUEUE.task_done()
# Close Eyes.
super(self.__class__, self).__exit__(exc_type, exc_value,
traceback)
except errors.NewTestError as error:
_LOGGER.info(error)
final_dir_name = _SUCCESS_DIR_NAME
except errors.TestFailedError as error:
_LOGGER.info(error)
final_dir_name = _FAILURE_DIR_NAME
else:
final_dir_name = _SUCCESS_DIR_NAME
finally:
final_dir = os.path.join(os.path.dirname(self._base_path),
final_dir_name)
base_path_final_copy = os.path.join(
final_dir, os.path.basename(self._base_path))
watched_path_final_copy = os.path.join(
base_path_final_copy,
os.path.relpath(self._watched_path, self._base_path))
_make_empty_directory(watched_path_final_copy)
_LOGGER.debug('Moving {} to {}'.format(
self._watched_path_copy, watched_path_final_copy))
if os.path.isdir(watched_path_final_copy):
shutil.rmtree(watched_path_final_copy)
elif os.path.exists(watched_path_final_copy):
os.remove(watched_path_final_copy)
os.rename(self._watched_path_copy, watched_path_final_copy)
def _get_app_environment(path, sep):
"""Get the host OS and browser.
Finds the nearest parent directory of the watched path with two or
more instances of sep and splits on it. The host OS and browser are
the last two fields but one.
Args:
path: The path in which to find the host information.
sep: The separator. If false, simply returns None for both.
Returns:
An iterable of two elements: the host OS and browser, which are
both strings or both None.
"""
prev_path = None
while path != prev_path and sep:
head, tail = os.path.split(path)
fields = tail.split(sep)
if len(fields) > 3:
return fields[-3:-1]
prev_path = path
path = head
return None, None
def _parse_args():
"""Parse command line arguments.
Returns:
A Namespace containing the parsed arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'patterns', nargs='*', default=[os.curdir],
help='glob of paths to watch (default: current directory)',
metavar='GLOB')
baseline_group = parser.add_argument_group(
'Eyes session arguments',
'startInfo parameters for the new Eyes session')
baseline_group.add_argument(
'--batch', help='batch all directories together as BATCH')
baseline_group.add_argument(
'--app', default=eyeswrapper.APP_NAME,
help='run against the APP baseline (default: %(default)s)')
baseline_group.add_argument(
'--test', help='set the test name (default: the path to watch)')
baseline_group.add_argument(
'--sep', default=_DEFAULT_SEP,
help='find the nearest parent directory to the watched path with '
'three or more instances of PATTERN, split on it, and set the host '
'OS and browser to the last two fields but one (default: '
'%(default)s)', metavar='PATTERN')
baseline_group.add_argument('--browser',
help='set the host browser (overrides --sep)')
baseline_group.add_argument('--os',
help='set the host OS (overrides --sep)')
path_group = parser.add_argument_group(
'file and directory name arguments')
path_group.add_argument(
'--done', default=_DONE_BASE_NAME,
help='end a test when FILENAME is created (default: %(default)s)',
metavar='FILENAME')
path_group.add_argument('--failed', default=_FAILURE_DIR_NAME,
help='put files into DIRNAME when an Eyes test '
'fails (default: %(default)s)', metavar='DIRNAME')
path_group.add_argument(
'--in-progress', default=watchdir.PROCESSING_DIR_NAME,
help='put files into DIRNAME for processing (default: %(default)s)',
metavar='DIRNAME')
path_group.add_argument(
'--passed', default=_SUCCESS_DIR_NAME,
help='put files into DIRNAME when an Eyes test passes (default: '
'%(default)s)', metavar='DIRNAME')
parser.add_argument('-a', '--api-key', required=True,
help='set the Applitools Eyes API key')
parser.add_argument('-i', '--index', '--array-base', default=_INDEX,
type=int, help='start uploading images from index N '
'(by default, indexing is disabled)', metavar='N')
parser.add_argument('--log', default='WARNING', type=str.upper,
help='set the logging level (default: %(default)s)',
metavar='LEVEL')
parser.add_argument('-t', '--tests', default=_MAX_CONCURRENT_TESTS,
type=int, help='run N tests concurrently (N <= 0 '
'means unlimited; default: %(default)d)',
metavar='N')
parser.add_argument('--timeout', default=_TIMEOUT, type=int, help='stop '
'watching after N seconds without a new file (by '
'default, timing out is disabled)', metavar='N')
return parser.parse_args()
def _literal_existing_part(pattern):
"""Returns the literal existing part of a glob.
The literal existing part is as many consecutive directories of the
glob as possible which do not include any glob metacharacters ('*',
'?', and '['). For example, the literal existing part of '/x/y/*/z/?'
is '/x/y'.
Args:
pattern: A file glob.
Returns:
The literal existing part of the glob.
"""
pattern += os.sep
while True:
dirname = os.path.dirname(pattern)
if glob.has_magic(dirname) or not os.path.exists(dirname):
pattern = dirname
else:
return dirname
def _matches_any_pattern(path, patterns):
"""Compares a path against a list of globs.
Args:
path: A path.
patterns: An iterable of file name globs.
Returns:
The first pattern the path matches, or False if none matches.
"""
normalized_path = os.path.normcase(os.path.normpath(path))
for pattern in patterns:
for matching_path in glob.glob(pattern):
if (os.path.normcase(os.path.normpath(matching_path)) ==
normalized_path):
return pattern
return False
def _set_up_logging(level):
"""Sets up logging.
Args:
level: The logging level.
"""
_LOGGER.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(
logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
_LOGGER.addHandler(handler)
_LOGGER.propagate = False
if _LOGGER.getEffectiveLevel() <= logging.DEBUG:
eyeswrapper.LOGGER = _LOGGER
watchdir.LOGGER = _LOGGER
from applitools import logger
eyes_logger = logger.StdoutLogger()
logger.set_logger(eyes_logger)
requests_logger = logging.getLogger('requests.packages.urllib3')
requests_logger.addHandler(handler)
requests_logger.setLevel(logging.DEBUG)
requests_logger.propagate = False
def main():
"""Watches directories and sends images to Eyes.
Use --help for full command line option documentation.
"""
# pylint: disable=global-statement
global _CONCURRENT_TEST_QUEUE
global _DONE_BASE_NAME
global _FAILURE_DIR_NAME
global _INDEX
global _MAX_CONCURRENT_TESTS
global _SUCCESS_DIR_NAME
global _TIMEOUT
args = _parse_args()
# Logging
_set_up_logging(args.log)
_LOGGER.debug('Args: {}'.format(args))
# Command line arguments
batch_info = None
if args.batch:
batch_info = eyes.BatchInfo(args.batch)
eyeswrapper.APP_NAME = args.app
if args.test:
eyeswrapper.TEST_NAME = args.test
_DONE_BASE_NAME = args.done
_FAILURE_DIR_NAME = args.failed
watchdir.PROCESSING_DIR_NAME = args.in_progress
_SUCCESS_DIR_NAME = args.passed
eyes.Eyes.api_key = args.api_key
_INDEX = args.index
if _INDEX and _INDEX < 0:
_LOGGER.warn(
'Invalid index {}; indexing will be disabled'.format(_INDEX))
_INDEX = None
_MAX_CONCURRENT_TESTS = args.tests
_CONCURRENT_TEST_QUEUE = Queue.Queue(_MAX_CONCURRENT_TESTS)
_TIMEOUT = args.timeout
# Watching
watched_paths = []
for pattern in args.patterns:
pattern = os.path.realpath(pattern)
path = _literal_existing_part(pattern)
normalized_path = os.path.normcase(path)
if normalized_path in watched_paths:
_LOGGER.info('Skipping {}: same as {}'.format(pattern,
normalized_path))
continue
watched_paths.append(normalized_path)
watchdir.watch(normalized_path, DirectoryGlobEventHandler,
base_path=normalized_path,
patterns=[os.path.normcase(pattern)],
batch_info=batch_info, host_app=args.browser,
host_os=args.os, sep=args.sep)
_LOGGER.info('Ready to start watching')
try:
while watchdir.is_running():
time.sleep(1)
except KeyboardInterrupt:
watchdir.stop_watching()
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WSGI tools for use with swift."""
import errno
import os
import signal
import time
import mimetools
from itertools import chain
from StringIO import StringIO
import eventlet
import eventlet.debug
from eventlet import greenio, GreenPool, sleep, wsgi, listen
from paste.deploy import loadwsgi
from eventlet.green import socket, ssl
from urllib import unquote
from swift.common import utils
from swift.common.swob import Request
from swift.common.utils import capture_stdio, disable_fallocate, \
drop_privileges, get_logger, NullLogger, config_true_value, \
validate_configuration, get_hub
class NamedConfigLoader(loadwsgi.ConfigLoader):
"""
Patch paste.deploy's ConfigLoader so each context object will know what
config section it came from.
"""
def get_context(self, object_type, name=None, global_conf=None):
context = super(NamedConfigLoader, self).get_context(
object_type, name=name, global_conf=global_conf)
context.name = name
return context
loadwsgi.ConfigLoader = NamedConfigLoader
class ConfigDirLoader(NamedConfigLoader):
"""
Read configuration from multiple files under the given path.
"""
def __init__(self, conf_dir):
# parent class uses filename attribute when building error messages
self.filename = conf_dir = conf_dir.strip()
defaults = {
'here': os.path.normpath(os.path.abspath(conf_dir)),
'__file__': os.path.abspath(conf_dir)
}
self.parser = loadwsgi.NicerConfigParser(conf_dir, defaults=defaults)
self.parser.optionxform = str # Don't lower-case keys
utils.read_conf_dir(self.parser, conf_dir)
def _loadconfigdir(object_type, uri, path, name, relative_to, global_conf):
if relative_to:
path = os.path.normpath(os.path.join(relative_to, path))
loader = ConfigDirLoader(path)
if global_conf:
loader.update_defaults(global_conf, overwrite=False)
return loader.get_context(object_type, name, global_conf)
# add config_dir parsing to paste.deploy
loadwsgi._loaders['config_dir'] = _loadconfigdir
def wrap_conf_type(f):
"""
Wrap a function whos first argument is a paste.deploy style config uri,
such that you can pass it an un-adorned raw filesystem path and the config
directive (either config: or config_dir:) will be added automatically
based on the type of filesystem entity at the given path (either a file or
directory) before passing it through to the paste.deploy function.
"""
def wrapper(conf_path, *args, **kwargs):
if os.path.isdir(conf_path):
conf_type = 'config_dir'
else:
conf_type = 'config'
conf_uri = '%s:%s' % (conf_type, conf_path)
return f(conf_uri, *args, **kwargs)
return wrapper
appconfig = wrap_conf_type(loadwsgi.appconfig)
loadapp = wrap_conf_type(loadwsgi.loadapp)
def monkey_patch_mimetools():
"""
mimetools.Message defaults content-type to "text/plain"
This changes it to default to None, so we can detect missing headers.
"""
orig_parsetype = mimetools.Message.parsetype
def parsetype(self):
if not self.typeheader:
self.type = None
self.maintype = None
self.subtype = None
self.plisttext = ''
else:
orig_parsetype(self)
mimetools.Message.parsetype = parsetype
def get_socket(conf, default_port=8080):
"""Bind socket to bind ip:port in conf
:param conf: Configuration dict to read settings from
:param default_port: port to use if not specified in conf
:returns : a socket object as returned from socket.listen or
ssl.wrap_socket if conf specifies cert_file
"""
bind_addr = (conf.get('bind_ip', '0.0.0.0'),
int(conf.get('bind_port', default_port)))
address_family = [addr[0] for addr in socket.getaddrinfo(
bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
sock = None
bind_timeout = int(conf.get('bind_timeout', 30))
retry_until = time.time() + bind_timeout
warn_ssl = False
while not sock and time.time() < retry_until:
try:
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
family=address_family)
if 'cert_file' in conf:
warn_ssl = True
sock = ssl.wrap_socket(sock, certfile=conf['cert_file'],
keyfile=conf['key_file'])
except socket.error, err:
if err.args[0] != errno.EADDRINUSE:
raise
sleep(0.1)
if not sock:
raise Exception(_('Could not bind to %s:%s '
'after trying for %s seconds') % (
bind_addr[0], bind_addr[1], bind_timeout))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# in my experience, sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600)
if warn_ssl:
ssl_warning_message = _('WARNING: SSL should only be enabled for '
'testing purposes. Use external SSL '
'termination for a production deployment.')
get_logger(conf).warning(ssl_warning_message)
print(ssl_warning_message)
return sock
class RestrictedGreenPool(GreenPool):
"""
Works the same as GreenPool, but if the size is specified as one, then the
spawn_n() method will invoke waitall() before returning to prevent the
caller from doing any other work (like calling accept()).
"""
def __init__(self, size=1024):
super(RestrictedGreenPool, self).__init__(size=size)
self._rgp_do_wait = (size == 1)
def spawn_n(self, *args, **kwargs):
super(RestrictedGreenPool, self).spawn_n(*args, **kwargs)
if self._rgp_do_wait:
self.waitall()
def run_server(conf, logger, sock):
# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
# some platforms. This locks in reported times to the timezone in which
# the server first starts running in locations that periodically change
# timezones.
os.environ['TZ'] = time.strftime("%z", time.gmtime())
wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
# Turn off logging requests by the underlying WSGI software.
wsgi.HttpProtocol.log_request = lambda *a: None
# Redirect logging other messages by the underlying WSGI software.
wsgi.HttpProtocol.log_message = \
lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60)
eventlet.hubs.use_hub(get_hub())
eventlet.patcher.monkey_patch(all=False, socket=True)
eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no'))
eventlet.debug.hub_exceptions(eventlet_debug)
# utils.LogAdapter stashes name in server; fallback on unadapted loggers
if hasattr(logger, 'server'):
log_name = logger.server
else:
log_name = logger.name
app = loadapp(conf['__file__'], global_conf={'log_name': log_name})
max_clients = int(conf.get('max_clients', '1024'))
pool = RestrictedGreenPool(size=max_clients)
try:
wsgi.server(sock, app, NullLogger(), custom_pool=pool)
except socket.error, err:
if err[0] != errno.EINVAL:
raise
pool.waitall()
#TODO(clayg): pull more pieces of this to test more
def run_wsgi(conf_path, app_section, *args, **kwargs):
"""
Runs the server using the specified number of workers.
:param conf_path: Path to paste.deploy style configuration file/directory
:param app_section: App name from conf file to load config from
"""
# Load configuration, Set logger and Load request processor
try:
(conf, logger, log_name) = \
_initrp(conf_path, app_section, *args, **kwargs)
except ConfigFileError, e:
print e
return
# bind to address and port
sock = get_socket(conf, default_port=kwargs.get('default_port', 8080))
# remaining tasks should not require elevated privileges
drop_privileges(conf.get('user', 'swift'))
# Ensure the application can be loaded before proceeding.
loadapp(conf_path, global_conf={'log_name': log_name})
# set utils.FALLOCATE_RESERVE if desired
reserve = int(conf.get('fallocate_reserve', 0))
if reserve > 0:
utils.FALLOCATE_RESERVE = reserve
# redirect errors to logger and close stdio
capture_stdio(logger)
worker_count = int(conf.get('workers', '1'))
# Useful for profiling [no forks].
if worker_count == 0:
run_server(conf, logger, sock)
return
def kill_children(*args):
"""Kills the entire process group."""
logger.error('SIGTERM received')
signal.signal(signal.SIGTERM, signal.SIG_IGN)
running[0] = False
os.killpg(0, signal.SIGTERM)
def hup(*args):
"""Shuts down the server, but allows running requests to complete"""
logger.error('SIGHUP received')
signal.signal(signal.SIGHUP, signal.SIG_IGN)
running[0] = False
running = [True]
signal.signal(signal.SIGTERM, kill_children)
signal.signal(signal.SIGHUP, hup)
children = []
while running[0]:
while len(children) < worker_count:
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
run_server(conf, logger, sock)
logger.notice('Child %d exiting normally' % os.getpid())
return
else:
logger.notice('Started child %s' % pid)
children.append(pid)
try:
pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
logger.error('Removing dead child %s' % pid)
children.remove(pid)
except OSError, err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
logger.notice('User quit')
break
greenio.shutdown_safe(sock)
sock.close()
logger.notice('Exited')
class ConfigFileError(Exception):
pass
def _initrp(conf_path, app_section, *args, **kwargs):
try:
conf = appconfig(conf_path, name=app_section)
except Exception, e:
raise ConfigFileError("Error trying to load config from %s: %s" %
(conf_path, e))
validate_configuration()
# pre-configure logger
log_name = conf.get('log_name', app_section)
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = get_logger(conf, log_name,
log_to_console=kwargs.pop('verbose', False),
log_route='wsgi')
# disable fallocate if desired
if config_true_value(conf.get('disable_fallocate', 'no')):
disable_fallocate()
monkey_patch_mimetools()
return (conf, logger, log_name)
def init_request_processor(conf_path, app_section, *args, **kwargs):
"""
Loads common settings from conf
Sets the logger
Loads the request processor
:param conf_path: Path to paste.deploy style configuration file/directory
:param app_section: App name from conf file to load config from
:returns: the loaded application entry point
:raises ConfigFileError: Exception is raised for config file error
"""
(conf, logger, log_name) = _initrp(conf_path, app_section, *args, **kwargs)
app = loadapp(conf_path, global_conf={'log_name': log_name})
return (app, conf, logger, log_name)
class WSGIContext(object):
"""
This class provides a means to provide context (scope) for a middleware
filter to have access to the wsgi start_response results like the request
status and headers.
"""
def __init__(self, wsgi_app):
self.app = wsgi_app
def _start_response(self, status, headers, exc_info=None):
"""
Saves response info without sending it to the remote client.
Uses the same semantics as the usual WSGI start_response.
"""
self._response_status = status
self._response_headers = headers
self._response_exc_info = exc_info
def _app_call(self, env):
"""
Ensures start_response has been called before returning.
"""
self._response_status = None
self._response_headers = None
self._response_exc_info = None
resp = self.app(env, self._start_response)
# if start_response has been called, just return the iter
if self._response_status is not None:
return resp
resp = iter(resp)
try:
first_chunk = resp.next()
except StopIteration:
return iter([])
else: # We got a first_chunk
return chain([first_chunk], resp)
def _get_status_int(self):
"""
Returns the HTTP status int from the last called self._start_response
result.
"""
return int(self._response_status.split(' ', 1)[0])
def _response_header_value(self, key):
"Returns str of value for given header key or None"
for h_key, val in self._response_headers:
if h_key.lower() == key.lower():
return val
return None
def make_pre_authed_request(env, method=None, path=None, body=None,
headers=None, agent='Swift', swift_source=None):
"""
Makes a new swob.Request based on the current env but with the
parameters specified. Note that this request will be preauthorized.
:param env: The WSGI environment to base the new request on.
:param method: HTTP method of new request; default is from
the original env.
:param path: HTTP path of new request; default is from the
original env. path should be compatible with what you
would send to Request.blank. path should be quoted and it
can include a query string. for example:
'/a%20space?unicode_str%E8%AA%9E=y%20es'
:param body: HTTP body of new request; empty by default.
:param headers: Extra HTTP headers of new request; None by
default.
:param agent: The HTTP user agent to use; default 'Swift'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:returns: Fresh swob.Request object.
"""
query_string = None
path = path or ''
if path and '?' in path:
path, query_string = path.split('?', 1)
newenv = make_pre_authed_env(env, method, path=unquote(path), agent=agent,
query_string=query_string,
swift_source=swift_source)
if not headers:
headers = {}
if body:
return Request.blank(path, environ=newenv, body=body, headers=headers)
else:
return Request.blank(path, environ=newenv, headers=headers)
def make_pre_authed_env(env, method=None, path=None, agent='Swift',
query_string=None, swift_source=None):
"""
Returns a new fresh WSGI environment with escalated privileges to
do backend checks, listings, etc. that the remote user wouldn't
be able to accomplish directly.
:param env: The WSGI environment to base the new environment on.
:param method: The new REQUEST_METHOD or None to use the
original.
:param path: The new path_info or none to use the original. path
should NOT be quoted. When building a url, a Webob
Request (in accordance with wsgi spec) will quote
env['PATH_INFO']. url += quote(environ['PATH_INFO'])
:param query_string: The new query_string or none to use the original.
When building a url, a Webob Request will append
the query string directly to the url.
url += '?' + env['QUERY_STRING']
:param agent: The HTTP user agent to use; default 'Swift'. You
can put %(orig)s in the agent to have it replaced
with the original env's HTTP_USER_AGENT, such as
'%(orig)s StaticWeb'. You also set agent to None to
use the original env's HTTP_USER_AGENT or '' to
have no HTTP_USER_AGENT.
:param swift_source: Used to mark the request as originating out of
middleware. Will be logged in proxy logs.
:returns: Fresh WSGI environment.
"""
newenv = {}
for name in ('eventlet.posthooks', 'HTTP_USER_AGENT', 'HTTP_HOST',
'PATH_INFO', 'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD',
'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT',
'SERVER_PROTOCOL', 'swift.cache', 'swift.source',
'swift.trans_id'):
if name in env:
newenv[name] = env[name]
if method:
newenv['REQUEST_METHOD'] = method
if path:
newenv['PATH_INFO'] = path
newenv['SCRIPT_NAME'] = ''
if query_string is not None:
newenv['QUERY_STRING'] = query_string
if agent:
newenv['HTTP_USER_AGENT'] = (
agent % {'orig': env.get('HTTP_USER_AGENT', '')}).strip()
elif agent == '' and 'HTTP_USER_AGENT' in newenv:
del newenv['HTTP_USER_AGENT']
if swift_source:
newenv['swift.source'] = swift_source
newenv['swift.authorize'] = lambda req: None
newenv['swift.authorize_override'] = True
newenv['REMOTE_USER'] = '.wsgi.pre_authed'
newenv['wsgi.input'] = StringIO('')
if 'SCRIPT_NAME' not in newenv:
newenv['SCRIPT_NAME'] = ''
return newenv
|
|
#
# File : mkdir.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2018, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2017-10-04 Bernard The first version
import os
import shutil
from shutil import ignore_patterns
def do_copy_file(src, dst):
# check source file
if not os.path.exists(src):
return
path = os.path.dirname(dst)
# mkdir if path not exist
if not os.path.exists(path):
os.makedirs(path)
shutil.copy2(src, dst)
def do_copy_folder(src_dir, dst_dir, ignore=None):
import shutil
# check source directory
if not os.path.exists(src_dir):
return
try:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
except:
print('Deletes folder: %s failed.' % dst_dir)
return
shutil.copytree(src_dir, dst_dir, ignore = ignore)
source_ext = ['c', 'h', 's', 'S', 'cpp', 'xpm']
source_list = []
def walk_children(child):
global source_list
global source_ext
# print child
full_path = child.rfile().abspath
file_type = full_path.rsplit('.',1)[1]
#print file_type
if file_type in source_ext:
if full_path not in source_list:
source_list.append(full_path)
children = child.all_children()
if children != []:
for item in children:
walk_children(item)
def walk_kconfig(RTT_ROOT, source_list):
for parent, dirnames, filenames in os.walk(RTT_ROOT):
if 'bsp' in parent:
continue
if '.git' in parent:
continue
if 'tools' in parent:
continue
if 'Kconfig' in filenames:
pathfile = os.path.join(parent, 'Kconfig')
source_list.append(pathfile)
if 'KConfig' in filenames:
pathfile = os.path.join(parent, 'KConfig')
source_list.append(pathfile)
def bsp_copy_files(bsp_root, dist_dir):
# copy BSP files
do_copy_folder(os.path.join(bsp_root), dist_dir,
ignore_patterns('build', 'dist', '*.pyc', '*.old', '*.map', 'rtthread.bin', '.sconsign.dblite', '*.elf', '*.axf', 'cconfig.h'))
def bsp_update_sconstruct(dist_dir):
with open(os.path.join(dist_dir, 'SConstruct'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'SConstruct'), 'w') as f:
for line in data:
if line.find('RTT_ROOT') != -1:
if line.find('sys.path') != -1:
f.write('# set RTT_ROOT\n')
f.write('if not os.getenv("RTT_ROOT"): \n RTT_ROOT="rt-thread"\n\n')
f.write(line)
def bsp_update_kconfig_testcases(dist_dir):
# delete testcases in rt-thread/Kconfig
if not os.path.isfile(os.path.join(dist_dir, 'rt-thread/Kconfig')):
return
with open(os.path.join(dist_dir, 'rt-thread/Kconfig'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'rt-thread/Kconfig'), 'w') as f:
for line in data:
if line.find('examples/utest/testcases/Kconfig') == -1:
f.write(line)
def bsp_update_kconfig(dist_dir):
# change RTT_ROOT in Kconfig
if not os.path.isfile(os.path.join(dist_dir, 'Kconfig')):
return
with open(os.path.join(dist_dir, 'Kconfig'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'Kconfig'), 'w') as f:
found = 0
for line in data:
if line.find('RTT_ROOT') != -1:
found = 1
if line.find('default') != -1 and found:
position = line.find('default')
line = line[0:position] + 'default "rt-thread"\n'
found = 0
f.write(line)
def bsp_update_kconfig_library(dist_dir):
# change RTT_ROOT in Kconfig
if not os.path.isfile(os.path.join(dist_dir, 'Kconfig')):
return
with open(os.path.join(dist_dir, 'Kconfig'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'Kconfig'), 'w') as f:
found = 0
for line in data:
if line.find('RTT_ROOT') != -1:
found = 1
if line.find('../libraries') != -1 and found:
position = line.find('../libraries')
line = line[0:position] + 'libraries/Kconfig"\n'
found = 0
f.write(line)
# change board/kconfig path
if not os.path.isfile(os.path.join(dist_dir, 'board/Kconfig')):
return
with open(os.path.join(dist_dir, 'board/Kconfig'), 'r') as f:
data = f.readlines()
with open(os.path.join(dist_dir, 'board/Kconfig'), 'w') as f:
for line in data:
if line.find('../libraries/HAL_Drivers/Kconfig') != -1:
position = line.find('../libraries/HAL_Drivers/Kconfig')
line = line[0:position] + 'libraries/HAL_Drivers/Kconfig"\n'
f.write(line)
def bs_update_ide_project(bsp_root, rtt_root, rttide = None):
import subprocess
# default update the projects which have template file
if rttide == None:
tgt_dict = {'mdk4':('keil', 'armcc'),
'mdk5':('keil', 'armcc'),
'iar':('iar', 'iar'),
'vs':('msvc', 'cl'),
'vs2012':('msvc', 'cl'),
'cdk':('gcc', 'gcc'),
'eclipse':('eclipse', 'gcc')}
else:
item = 'eclipse --project-name=' + rttide['project_name']
tgt_dict = {item:('gcc', 'gcc')}
scons_env = os.environ.copy()
scons_env['RTT_ROOT'] = rtt_root
for item in tgt_dict:
child = subprocess.Popen('scons --target=' + item, cwd=bsp_root, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = child.communicate()
if child.returncode == 0:
print('update %s project' % item)
def zip_dist(dist_dir, dist_name):
import zipfile
zip_filename = os.path.join(dist_dir)
zip = zipfile.ZipFile(zip_filename + '.zip', 'w')
pre_len = len(os.path.dirname(dist_dir))
for parent, dirnames, filenames in os.walk(dist_dir):
for filename in filenames:
pathfile = os.path.join(parent, filename)
arcname = pathfile[pre_len:].strip(os.path.sep)
zip.write(pathfile, arcname)
zip.close()
def MkDist_Strip(program, BSP_ROOT, RTT_ROOT, Env):
global source_list
print('make distribution and strip useless files....')
dist_name = os.path.basename(BSP_ROOT)
dist_dir = os.path.join(BSP_ROOT, 'dist-strip', dist_name)
target_path = os.path.join(dist_dir, 'rt-thread')
print('=> %s' % os.path.basename(BSP_ROOT))
bsp_copy_files(BSP_ROOT, dist_dir)
# copy stm32 bsp libiary files
if os.path.basename(os.path.dirname(BSP_ROOT)) == 'stm32':
print("=> copy stm32 bsp library")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
library_dir = os.path.join(dist_dir, 'libraries')
bsp_copy_files(os.path.join(library_path, 'HAL_Drivers'), os.path.join(library_dir, 'HAL_Drivers'))
bsp_copy_files(os.path.join(library_path, Env['bsp_lib_type']), os.path.join(library_dir, Env['bsp_lib_type']))
shutil.copyfile(os.path.join(library_path, 'Kconfig'), os.path.join(library_dir, 'Kconfig'))
# do bsp special dist handle
if 'dist_handle' in Env:
print("=> start dist handle")
dist_handle = Env['dist_handle']
dist_handle(BSP_ROOT, dist_dir)
# get all source files from program
for item in program:
walk_children(item)
source_list.sort()
# copy the source files without libcpu and components/libc in RT-Thread
target_list = []
libcpu_dir = os.path.join(RTT_ROOT, 'libcpu').lower()
libc_dir = os.path.join(RTT_ROOT, 'components', 'libc', 'compilers').lower()
sal_dir = os.path.join(RTT_ROOT, 'components', 'net', 'sal_socket').lower()
sources_include_sal = False
for src in source_list:
if src.lower().startswith(BSP_ROOT.lower()):
continue
# skip libc and libcpu dir
if src.lower().startswith(libcpu_dir):
continue
if src.lower().startswith(libc_dir):
continue
if src.lower().startswith(sal_dir):
sources_include_sal = True
continue
if src.lower().startswith(RTT_ROOT.lower()):
target_list.append(src)
source_list = target_list
# get source directory
src_dir = []
for src in source_list:
src = src.replace(RTT_ROOT, '')
if src[0] == os.sep or src[0] == '/':
src = src[1:]
path = os.path.dirname(src)
sub_path = path.split(os.sep)
full_path = RTT_ROOT
for item in sub_path:
full_path = os.path.join(full_path, item)
if full_path not in src_dir:
src_dir.append(full_path)
# add all of SConscript files
for item in src_dir:
source_list.append(os.path.join(item, 'SConscript'))
# add all of Kconfig files
walk_kconfig(RTT_ROOT, source_list)
# copy all files to target directory
source_list.sort()
for src in source_list:
dst = src.replace(RTT_ROOT, '')
if dst[0] == os.sep or dst[0] == '/':
dst = dst[1:]
print('=> %s' % dst)
dst = os.path.join(target_path, dst)
do_copy_file(src, dst)
# copy tools directory
print('=> tools')
do_copy_folder(os.path.join(RTT_ROOT, 'tools'), os.path.join(target_path, 'tools'), ignore_patterns('*.pyc'))
do_copy_file(os.path.join(RTT_ROOT, 'Kconfig'), os.path.join(target_path, 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(RTT_ROOT, 'COPYING'), os.path.join(target_path, 'COPYING'))
do_copy_file(os.path.join(RTT_ROOT, 'README.md'), os.path.join(target_path, 'README.md'))
do_copy_file(os.path.join(RTT_ROOT, 'README_zh.md'), os.path.join(target_path, 'README_zh.md'))
print('=> %s' % os.path.join('components', 'libc', 'compilers'))
do_copy_folder(os.path.join(RTT_ROOT, 'components', 'libc', 'compilers'), os.path.join(target_path, 'components', 'libc', 'compilers'))
if sources_include_sal:
print('=> %s' % os.path.join('components', 'net', 'sal_socket'))
do_copy_folder(os.path.join(RTT_ROOT, 'components', 'net', 'sal_socket'), os.path.join(target_path, 'components', 'net', 'sal_socket'))
# copy all libcpu/ARCH directory
import rtconfig
print('=> %s' % (os.path.join('libcpu', rtconfig.ARCH, rtconfig.CPU)))
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, rtconfig.CPU), os.path.join(target_path, 'libcpu', rtconfig.ARCH, rtconfig.CPU))
if os.path.exists(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, 'common')):
print('=> %s' % (os.path.join('libcpu', rtconfig.ARCH, 'common')))
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH, 'common'), os.path.join(target_path, 'libcpu', rtconfig.ARCH, 'common'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'Kconfig'), os.path.join(target_path, 'libcpu', 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'SConscript'), os.path.join(target_path, 'libcpu', 'SConscript'))
print('Update configuration files...')
# change RTT_ROOT in SConstruct
bsp_update_sconstruct(dist_dir)
# change RTT_ROOT in Kconfig
bsp_update_kconfig(dist_dir)
bsp_update_kconfig_library(dist_dir)
# delete testcases in Kconfig
bsp_update_kconfig_testcases(dist_dir)
# update all project files
bs_update_ide_project(dist_dir, target_path)
# make zip package
zip_dist(dist_dir, dist_name)
print('done!')
def MkDist(program, BSP_ROOT, RTT_ROOT, Env, rttide = None):
print('make distribution....')
dist_name = os.path.basename(BSP_ROOT)
if rttide == None:
dist_dir = os.path.join(BSP_ROOT, 'dist', dist_name)
else:
dist_dir = rttide['project_path']
target_path = os.path.join(dist_dir, 'rt-thread')
# copy BSP files
print('=> %s' % os.path.basename(BSP_ROOT))
bsp_copy_files(BSP_ROOT, dist_dir)
# do bsp special dist handle
if 'dist_handle' in Env:
print("=> start dist handle")
dist_handle = Env['dist_handle']
dist_handle(BSP_ROOT, dist_dir)
# copy tools directory
print('=> components')
do_copy_folder(os.path.join(RTT_ROOT, 'components'), os.path.join(target_path, 'components'))
# skip documentation directory
# skip examples
# copy include directory
print('=> include')
do_copy_folder(os.path.join(RTT_ROOT, 'include'), os.path.join(target_path, 'include'))
# copy all libcpu/ARCH directory
print('=> libcpu')
import rtconfig
do_copy_folder(os.path.join(RTT_ROOT, 'libcpu', rtconfig.ARCH), os.path.join(target_path, 'libcpu', rtconfig.ARCH))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'Kconfig'), os.path.join(target_path, 'libcpu', 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'libcpu', 'SConscript'), os.path.join(target_path, 'libcpu', 'SConscript'))
# copy src directory
print('=> src')
do_copy_folder(os.path.join(RTT_ROOT, 'src'), os.path.join(target_path, 'src'))
# copy tools directory
print('=> tools')
do_copy_folder(os.path.join(RTT_ROOT, 'tools'), os.path.join(target_path, 'tools'), ignore_patterns('*.pyc'))
do_copy_file(os.path.join(RTT_ROOT, 'Kconfig'), os.path.join(target_path, 'Kconfig'))
do_copy_file(os.path.join(RTT_ROOT, 'AUTHORS'), os.path.join(target_path, 'AUTHORS'))
do_copy_file(os.path.join(RTT_ROOT, 'COPYING'), os.path.join(target_path, 'COPYING'))
do_copy_file(os.path.join(RTT_ROOT, 'README.md'), os.path.join(target_path, 'README.md'))
do_copy_file(os.path.join(RTT_ROOT, 'README_zh.md'), os.path.join(target_path, 'README_zh.md'))
print('Update configuration files...')
# change RTT_ROOT in SConstruct
bsp_update_sconstruct(dist_dir)
# change RTT_ROOT in Kconfig
bsp_update_kconfig(dist_dir)
bsp_update_kconfig_library(dist_dir)
# delete testcases in Kconfig
bsp_update_kconfig_testcases(dist_dir)
# update all project files
if rttide == None:
bs_update_ide_project(dist_dir, target_path)
else:
bs_update_ide_project(dist_dir, target_path, rttide)
# make zip package
if rttide == None:
zip_dist(dist_dir, dist_name)
print('done!')
|
|
import json
import sqlite3
import os
import re
from random import randint, choice
from bs4 import BeautifulSoup as BS
from discord.ext.commands import Bot
import requests
import pymongo
from constants import CLIENT_ID, KHALED_CHOICES, ZOLTAR_CHOICES, IMPLANT_TYPES, add_quote, update_quotes, ValidationError, QUOTE_LIST, YES_NO
from intel_entry import IntelEntry
my_bot = Bot(command_prefix="!")
conn = pymongo.MongoClient()
db = conn['broadside']
intel = db.intel
INSERT_ERR_USAGE = "Error in arguments. Usage: <timer_name:titan POS>, <system:HED-GP> <time:21:00 > <date: 07/24/17>"
VIEW_ERR_USAGE = "Error in arguments. Usage: <key:system>, <value:HED-GP>"
@my_bot.event
async def on_ready():
print("Client logged in.")
update_quotes()
@my_bot.command()
async def decide(args):
return await my_bot.say(choice(YES_NO))
@my_bot.command()
async def addintel(args):
args = args.split()
if len(args) not in [5, 6]:
return await my_bot.say(INSERT_ERR_USAGE)
timer_name, alliance, system, time, date = args[:5]
if len(args[5:]) > 0:
location = args[5]
entry = IntelEntry(timer_name=timer_name, alliance=alliance, system=system, time=time, date=date, location=location)
entry_id = str(intel.insert(entry.to_dict()))
return await my_bot.say("Inserted intel entry {}.".format(entry_id))
@my_bot.command()
async def viewintel(args):
args = args.split()
if len(args) != 2:
return await my_bot.say(VIEW_ERR_USAGE)
else:
return await my_bot.say(json.dumps(list(intel.find({args[0]: args[1]}))))
@my_bot.command()
async def viewintelkeys():
return await my_bot.say(json.dumps(list(IntelEntry.KEYS)))
@my_bot.command()
async def hello():
"""
Test command to ensure the bot is working
Example: !hello
"""
return await my_bot.say("Hi team!")
@my_bot.command()
async def question():
"""
Ask a yes or no question and receive an answer to it.
Example: !question Does she love me?
"""
return await my_bot.say(choice(ZOLTAR_CHOICES))
@my_bot.command()
async def roll():
"""
Roll a die which results somewhere between 1-100
Example: !roll
"""
return await my_bot.say(randint(1, 100))
@my_bot.command()
async def yt(args):
"""
Get the first Youtube search result video
Example: !yt how do I take a screenshot
"""
if not args:
return await my_bot.say("Empty search terms")
enc_search = '+'.join(args.split())
print(enc_search)
url = f"https://www.youtube.com/results?search_query={enc_search}"
resp = get(url)
if resp.status_code != 200:
return await my_bot.say("Failed to retrieve search")
# Build a BS parser and find all Youtube links on the page
bs = BS(resp.text, "html.parser")
items = bs.find("div", id="results").find_all("div", class_="yt-lockup-content")
if not items:
return await my_bot.say("No videos found")
# Construct an easy list of URLs
links = []
for i in items:
try:
links.append(i.find("a", class_="yt-uix-sessionlink")["href"])
except TypeError: # i.find() returned None
return await my_bot.say("Was unable to find results for this query. Sorry!")
hrefs = []
for u in links:
if u.startswith("/watch"):
hrefs.append(u)
# Check if we have any at all
if not hrefs:
return await my_bot.say("No URLs found (? wat)")
# Finish by sending the URL out
return await my_bot.say(f"https://www.youtube.com{hrefs[0]}")
@my_bot.command()
async def spam(args):
"""
Spam a channel with dumb things
Example: !spam :ok_hand:
"""
if not args or len(args) > 25:
return await my_bot.say("Invalid spam input")
y = args * randint(5, 20)
return await my_bot.say(f"{''.join(y)}")
@my_bot.command()
async def blessup():
"""
Recite a DJ Khaled quote
Example: !blessup
"""
return await my_bot.say(choice(KHALED_CHOICES))
@my_bot.command()
async def getprice(msg):
EVEMARKETER = "http://api.evemarketer.com/ec/marketstat?typeid=%s®ionlimit=10000002"
EVESTATICDATADUMP = "data/sqlite-latest.sqlite"
if os.path.isfile(os.path.expanduser(EVESTATICDATADUMP)):
conn = sqlite3.connect(os.path.expanduser(EVESTATICDATADUMP))
else:
conn = None
def get_type_id(name):
c = conn.cursor()
c.execute("select typeName, typeID from invTypes where typeName = '{0}%' collate nocase;".format(name))
result = c.fetchone()
if result:
return result
c.execute("select typeName, typeID from invTypes where typeName like '%{0}%' collate nocase;".format(name))
results = c.fetchall()
if len(results) == 0:
return None
results = sorted(results, key=lambda x: len(x[0]))
print(results[0])
return results[0]
def item_to_price(item):
try:
result = get_type_id(item)
assert result
item = result[0]
url = EVEMARKETER % result[1]
print(url)
soup = BS(requests.get(url).content, "html.parser")
price = str(soup.find("sell").min)
removetags = re.compile("<(.|\n)*?>")
price = removetags.sub("", price)
price = float(price)
if price == 0:
raise ZeroDivisionError
import locale
locale.setlocale(locale.LC_ALL, "")
formatted_price = locale.format('%d', price, True)
return item, formatted_price, price
except Exception as e:
return None, None, None
if not conn:
return await my_bot.say("EVE static data dump not loaded")
term = msg.replace("!getprice ", "")
try:
item, formatted_price, price = item_to_price(term)
print("%s : %s ISK" % (item, formatted_price))
return await my_bot.say("%s : %s ISK" % (item, formatted_price))
except Exception as e:
return await my_bot.say("Unable to find search item")
@my_bot.command()
async def getsetprice(msg):
EVECENTRAL = "https://api.evemarketer.com/ec/marketstat?typeid=%s®ionlimit=10000002"
EVESTATICDATADUMP = "data/sqlite-latest.sqlite"
if os.path.isfile(os.path.expanduser(EVESTATICDATADUMP)):
conn = sqlite3.connect(os.path.expanduser(EVESTATICDATADUMP))
else:
conn = None
def get_type_id(name):
c = conn.cursor()
c.execute("select typeName, typeID from invTypes where typeName = '{0}%' collate nocase;".format(name))
result = c.fetchone()
if result:
return result
c.execute("select typeName, typeID from invTypes where typeName like '%{0}%' collate nocase;".format(name))
results = c.fetchall()
if len(results) == 0:
return None
results = sorted(results, key=lambda x: len(x[0]))
print(results[0])
return results[0]
def item_to_price(item):
try:
result = get_type_id(item)
assert result
item = result[0]
url = EVECENTRAL % result[1]
print(url)
soup = BS(urllib.request.urlopen(url), "html.parser")
price = str(soup.find("sell").min)
removetags = re.compile("<(.|\n)*?>")
price = removetags.sub("", price)
price = float(price)
if price == 0:
raise ZeroDivisionError
import locale
locale.setlocale(locale.LC_ALL, "")
formatted_price = locale.format('%d', price, True)
return item, formatted_price, price
except Exception as e:
return None, None, None
if not conn:
return await my_bot.say("EVE static data dump not loaded")
term = msg.replace("!getsetprice ", "")
term = term.strip()
terms = map(lambda x: term + " " + x, IMPLANT_TYPES)
total = 0
results = []
for item in terms:
r = item_to_price(item)
print(item, r)
if r[0] and r[1]:
results.append((r[0], r[1]))
total = total + r[2]
import locale
locale.setlocale(locale.LC_ALL, "")
ftotal = locale.format('%d', total, True)
results.append(("total", ftotal))
return await my_bot.say("%s" % str(results))
@my_bot.command(pass_context=True)
async def addquote(ctx, *args):
"""Displays a random quote from an array of quotes."""
msg = ctx.message.content
try:
add_quote(msg)
return await my_bot.say('Quote successfuly added.')
except ValidationError as exception:
return await my_bot.say(exception)
@my_bot.command()
async def quote(*args):
"""Displays a random quote from quotes.txt"""
return await my_bot.say(choice(QUOTE_LIST))
my_bot.run(CLIENT_ID)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ExperimentSampleToAlignment.data'
db.add_column(u'main_experimentsampletoalignment', 'data',
self.gf('main.custom_fields.PostgresJsonField')(default={}),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ExperimentSampleToAlignment.data'
db.delete_column(u'main_experimentsampletoalignment', 'data')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.alignmentgroup': {
'Meta': {'object_name': 'AlignmentGroup'},
'aligner': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'alignment_options': ('main.custom_fields.PostgresJsonField', [], {'default': '\'{"skip_het_only": false, "call_as_haploid": false}\''}),
'dataset_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.Dataset']", 'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'reference_genome': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.ReferenceGenome']"}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'NOT_STARTED'", 'max_length': '40'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'0855aba9'", 'unique': 'True', 'max_length': '8'})
},
u'main.chromosome': {
'Meta': {'object_name': 'Chromosome'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'num_bases': ('django.db.models.fields.BigIntegerField', [], {}),
'reference_genome': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.ReferenceGenome']"}),
'seqrecord_id': ('django.db.models.fields.CharField', [], {'default': "'chrom_1'", 'max_length': '256'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'94eb5264'", 'unique': 'True', 'max_length': '8'})
},
u'main.contig': {
'Meta': {'object_name': 'Contig'},
'dataset_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.Dataset']", 'null': 'True', 'blank': 'True'}),
'experiment_sample_to_alignment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.ExperimentSampleToAlignment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'metadata': ('main.custom_fields.PostgresJsonField', [], {}),
'num_bases': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'parent_reference_genome': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['main.ReferenceGenome']"}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'915fea2a'", 'unique': 'True', 'max_length': '8'})
},
u'main.dataset': {
'Meta': {'object_name': 'Dataset'},
'filesystem_idx_location': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'filesystem_location': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'READY'", 'max_length': '40'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'90619aa7'", 'unique': 'True', 'max_length': '8'})
},
u'main.experimentsample': {
'Meta': {'object_name': 'ExperimentSample'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': u"orm['main.ExperimentSampleRelation']", 'to': u"orm['main.ExperimentSample']"}),
'data': ('main.custom_fields.PostgresJsonField', [], {}),
'dataset_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.Dataset']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Project']"}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'577f319f'", 'unique': 'True', 'max_length': '8'})
},
u'main.experimentsamplerelation': {
'Meta': {'object_name': 'ExperimentSampleRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_relationships'", 'to': u"orm['main.ExperimentSample']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_relationships'", 'to': u"orm['main.ExperimentSample']"}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'a588c115'", 'unique': 'True', 'max_length': '8'})
},
u'main.experimentsampletoalignment': {
'Meta': {'object_name': 'ExperimentSampleToAlignment'},
'alignment_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.AlignmentGroup']"}),
'data': ('main.custom_fields.PostgresJsonField', [], {}),
'dataset_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.Dataset']", 'null': 'True', 'blank': 'True'}),
'experiment_sample': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.ExperimentSample']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'fb157bd2'", 'unique': 'True', 'max_length': '8'})
},
u'main.project': {
'Meta': {'object_name': 'Project'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.UserProfile']"}),
's3_backed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'1a77912d'", 'unique': 'True', 'max_length': '8'})
},
u'main.referencegenome': {
'Meta': {'object_name': 'ReferenceGenome'},
'dataset_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.Dataset']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_materialized_variant_view_valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'metadata': ('main.custom_fields.PostgresJsonField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Project']"}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'65e1af38'", 'unique': 'True', 'max_length': '8'}),
'variant_key_map': ('main.custom_fields.PostgresJsonField', [], {})
},
u'main.region': {
'Meta': {'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'reference_genome': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.ReferenceGenome']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'7325a1ff'", 'unique': 'True', 'max_length': '8'})
},
u'main.regioninterval': {
'Meta': {'object_name': 'RegionInterval'},
'end': ('django.db.models.fields.BigIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Region']"}),
'start': ('django.db.models.fields.BigIntegerField', [], {})
},
u'main.s3file': {
'Meta': {'object_name': 'S3File'},
'bucket': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
},
u'main.savedvariantfilterquery': {
'Meta': {'object_name': 'SavedVariantFilterQuery'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.UserProfile']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'fb15f836'", 'unique': 'True', 'max_length': '8'})
},
u'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'bb6658d2'", 'unique': 'True', 'max_length': '8'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'main.variant': {
'Meta': {'object_name': 'Variant'},
'chromosome': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Chromosome']"}),
'data': ('main.custom_fields.PostgresJsonField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.BigIntegerField', [], {}),
'ref_value': ('django.db.models.fields.TextField', [], {}),
'reference_genome': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.ReferenceGenome']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'b1e752e1'", 'unique': 'True', 'max_length': '8'})
},
u'main.variantalternate': {
'Meta': {'object_name': 'VariantAlternate'},
'alt_value': ('django.db.models.fields.TextField', [], {}),
'data': ('main.custom_fields.PostgresJsonField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_primary': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'05a9a5ea'", 'unique': 'True', 'max_length': '8'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Variant']", 'null': 'True'})
},
u'main.variantcallercommondata': {
'Meta': {'object_name': 'VariantCallerCommonData'},
'alignment_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.AlignmentGroup']"}),
'data': ('main.custom_fields.PostgresJsonField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_dataset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Dataset']"}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Variant']"})
},
u'main.variantevidence': {
'Meta': {'object_name': 'VariantEvidence'},
'data': ('main.custom_fields.PostgresJsonField', [], {}),
'experiment_sample': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.ExperimentSample']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'d1ea8807'", 'unique': 'True', 'max_length': '8'}),
'variant_caller_common_data': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.VariantCallerCommonData']"}),
'variantalternate_set': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.VariantAlternate']", 'symmetrical': 'False'})
},
u'main.variantset': {
'Meta': {'object_name': 'VariantSet'},
'dataset_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.Dataset']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'reference_genome': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.ReferenceGenome']"}),
'uid': ('django.db.models.fields.CharField', [], {'default': "'afb099d6'", 'unique': 'True', 'max_length': '8'}),
'variants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.Variant']", 'null': 'True', 'through': u"orm['main.VariantToVariantSet']", 'blank': 'True'})
},
u'main.varianttovariantset': {
'Meta': {'object_name': 'VariantToVariantSet'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sample_variant_set_association': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.ExperimentSample']", 'null': 'True', 'blank': 'True'}),
'variant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Variant']"}),
'variant_set': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.VariantSet']"})
}
}
complete_apps = ['main']
|
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test objects for interacting with a bitcoind node over the p2p protocol.
The P2PInterface objects interact with the bitcoind nodes under test using the
node's p2p interface. They can be used to send messages to the node, and
callbacks can be registered that execute when messages are received from the
node. Messages are sent to/received from the node on an asyncio event loop.
State held inside the objects must be guarded by the p2p_lock to avoid data
races between the main testing thread and the event loop.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages
P2PTxInvStore: A p2p interface class that inherits from P2PDataStore, and keeps
a count of how many times each txid has been announced."""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import (
CBlockHeader,
MAX_HEADERS_RESULTS,
MIN_VERSION_SUPPORTED,
msg_addr,
msg_addrv2,
msg_block,
MSG_BLOCK,
msg_blocktxn,
msg_cfcheckpt,
msg_cfheaders,
msg_cfilter,
msg_cmpctblock,
msg_feefilter,
msg_filteradd,
msg_filterclear,
msg_filterload,
msg_getaddr,
msg_getblocks,
msg_getblocktxn,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_mempool,
msg_merkleblock,
msg_notfound,
msg_ping,
msg_pong,
msg_sendaddrv2,
msg_sendcmpct,
msg_sendheaders,
msg_tx,
MSG_TX,
MSG_TYPE_MASK,
msg_verack,
msg_version,
MSG_WTX,
msg_wtxidrelay,
NODE_NETWORK,
NODE_WITNESS,
sha256,
)
from test_framework.util import (
MAX_NODES,
p2p_port,
wait_until_helper,
)
logger = logging.getLogger("TestFramework.p2p")
MESSAGEMAP = {
b"addr": msg_addr,
b"addrv2": msg_addrv2,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cfcheckpt": msg_cfcheckpt,
b"cfheaders": msg_cfheaders,
b"cfilter": msg_cfilter,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"filteradd": msg_filteradd,
b"filterclear": msg_filterclear,
b"filterload": msg_filterload,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"merkleblock": msg_merkleblock,
b"notfound": msg_notfound,
b"ping": msg_ping,
b"pong": msg_pong,
b"sendaddrv2": msg_sendaddrv2,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
b"wtxidrelay": msg_wtxidrelay,
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
"signet": b"\x0a\x03\xcf\x40", # signet
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor):
assert not self.is_connected
self.timeout_factor = timeout_factor
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.magic_bytes = MAGIC_BYTES[net]
def peer_connect(self, dstaddr, dstport, *, net, timeout_factor):
self.peer_connect_helper(dstaddr, dstport, net, timeout_factor)
loop = NetworkThread.network_event_loop
logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
coroutine = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine)
def peer_accept_connection(self, connect_id, connect_cb=lambda: None, *, net, timeout_factor):
self.peer_connect_helper('0', 0, net, timeout_factor)
logger.debug('Listening for Bitcoin Node with id: {}'.format(connect_id))
return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id)
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.magic_bytes:
raise ValueError("magic bytes mismatch: {} != {}".format(repr(self.magic_bytes), repr(self.recvbuf)))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
msgtype = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if msgtype not in MESSAGEMAP:
raise ValueError("Received unknown msgtype from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, msgtype, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[msgtype]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
tmsg = self.build_message(message)
self._log_message("send", message)
return self.send_raw_message(tmsg)
def send_raw_message(self, raw_message_bytes):
if not self.is_connected:
raise IOError('Not connected')
def maybe_write():
if not self._transport:
return
if self._transport.is_closing():
return
self._transport.write(raw_message_bytes)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def build_message(self, message):
"""Build a serialized P2P message"""
msgtype = message.msgtype
data = message.serialize()
tmsg = self.magic_bytes
tmsg += msgtype
tmsg += b"\x00" * (12 - len(msgtype))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Bitcoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self, support_addrv2=False, wtxidrelay=True):
super().__init__()
# Track number of messages of each type received.
# Should be read-only in a test.
self.message_count = defaultdict(int)
# Track the most recent message of each type.
# To wait for a message to be received, pop that message from
# this and use self.wait_until.
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
self.support_addrv2 = support_addrv2
# If the peer supports wtxid-relay
self.wtxidrelay = wtxidrelay
def peer_connect_send_version(self, services):
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent in connection_made callback
def peer_connect(self, *args, services=NODE_NETWORK | NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
self.peer_connect_send_version(services)
return create_conn
def peer_accept_connection(self, *args, services=NODE_NETWORK | NODE_WITNESS, **kwargs):
create_conn = super().peer_accept_connection(*args, **kwargs)
self.peer_connect_send_version(services)
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with p2p_lock:
try:
msgtype = message.msgtype.decode('ascii')
self.message_count[msgtype] += 1
self.last_message[msgtype] = message
getattr(self, 'on_' + msgtype)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_addrv2(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cfcheckpt(self, message): pass
def on_cfheaders(self, message): pass
def on_cfilter(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_filteradd(self, message): pass
def on_filterclear(self, message): pass
def on_filterload(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_merkleblock(self, message): pass
def on_notfound(self, message): pass
def on_pong(self, message): pass
def on_sendaddrv2(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_wtxidrelay(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
pass
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
if message.nVersion >= 70016 and self.wtxidrelay:
self.send_message(msg_wtxidrelay())
if self.support_addrv2:
self.send_message(msg_sendaddrv2())
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_until(self, test_function_in, *, timeout=60, check_connected=True):
def test_function():
if check_connected:
assert self.is_connected
return test_function_in()
wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor)
def wait_for_connect(self, timeout=60):
test_function = lambda: self.is_connected
wait_until_helper(test_function, timeout=timeout, lock=p2p_lock)
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
self.wait_until(test_function, timeout=timeout, check_connected=False)
# Message receiving helper methods
def wait_for_tx(self, txid, timeout=60):
def test_function():
if not self.last_message.get('tx'):
return False
return self.last_message['tx'].tx.rehash() == txid
self.wait_until(test_function, timeout=timeout)
def wait_for_block(self, blockhash, timeout=60):
def test_function():
return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
self.wait_until(test_function, timeout=timeout)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == int(blockhash, 16)
self.wait_until(test_function, timeout=timeout)
def wait_for_merkleblock(self, blockhash, timeout=60):
def test_function():
last_filtered_block = self.last_message.get('merkleblock')
if not last_filtered_block:
return False
return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16)
self.wait_until(test_function, timeout=timeout)
def wait_for_getdata(self, hash_list, timeout=60):
"""Waits for a getdata message.
The object hashes in the inventory vector must match the provided hash_list."""
def test_function():
last_data = self.last_message.get("getdata")
if not last_data:
return False
return [x.hash for x in last_data.inv] == hash_list
self.wait_until(test_function, timeout=timeout)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
def test_function():
return self.last_message.get("getheaders")
self.wait_until(test_function, timeout=timeout)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
def test_function():
return self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
self.wait_until(test_function, timeout=timeout)
def wait_for_verack(self, timeout=60):
def test_function():
return "verack" in self.last_message
self.wait_until(test_function, timeout=timeout)
# Message sending helper functions
def send_and_ping(self, message, timeout=60):
self.send_message(message)
self.sync_with_ping(timeout=timeout)
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
def test_function():
return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
self.wait_until(test_function, timeout=timeout)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
p2p_lock = threading.Lock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.listeners = {}
NetworkThread.protos = {}
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until_helper(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
# Safe to remove event loop.
NetworkThread.network_event_loop = None
@classmethod
def listen(cls, p2p, callback, port=None, addr=None, idx=1):
""" Ensure a listening server is running on the given port, and run the
protocol specified by `p2p` on the next connection to it. Once ready
for connections, call `callback`."""
if port is None:
assert 0 < idx <= MAX_NODES
port = p2p_port(MAX_NODES - idx)
if addr is None:
addr = '127.0.0.1'
coroutine = cls.create_listen_server(addr, port, callback, p2p)
cls.network_event_loop.call_soon_threadsafe(cls.network_event_loop.create_task, coroutine)
@classmethod
async def create_listen_server(cls, addr, port, callback, proto):
def peer_protocol():
"""Returns a function that does the protocol handling for a new
connection. To allow different connections to have different
behaviors, the protocol function is first put in the cls.protos
dict. When the connection is made, the function removes the
protocol function from that dict, and returns it so the event loop
can start executing it."""
response = cls.protos.get((addr, port))
cls.protos[(addr, port)] = None
return response
if (addr, port) not in cls.listeners:
# When creating a listener on a given (addr, port) we only need to
# do it once. If we want different behaviors for different
# connections, we can accomplish this by providing different
# `proto` functions
listener = await cls.network_event_loop.create_server(peer_protocol, addr, port)
logger.debug("Listening server on %s:%d should be started" % (addr, port))
cls.listeners[(addr, port)] = listener
cls.protos[(addr, port)] = proto
callback(addr, port)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_reason is set: assert that the correct reject message is logged"""
with p2p_lock:
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
if force_send:
for b in blocks:
self.send_message(msg_block(block=b))
else:
self.send_message(msg_headers([CBlockHeader(block) for block in blocks]))
self.wait_until(
lambda: blocks[-1].sha256 in self.getdata_requests,
timeout=timeout,
check_connected=success,
)
if expect_disconnect:
self.wait_for_disconnect(timeout=timeout)
else:
self.sync_with_ping(timeout=timeout)
if success:
self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_reason is set: assert that the correct reject message is logged."""
with p2p_lock:
for tx in txs:
self.tx_store[tx.sha256] = tx
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
class P2PTxInvStore(P2PInterface):
"""A P2PInterface which stores a count of how many times each txid has been announced."""
def __init__(self):
super().__init__()
self.tx_invs_received = defaultdict(int)
def on_inv(self, message):
super().on_inv(message) # Send getdata in response.
# Store how many times invs have been received for each tx.
for i in message.inv:
if (i.type == MSG_TX) or (i.type == MSG_WTX):
# save txid
self.tx_invs_received[i.hash] += 1
def get_invs(self):
with p2p_lock:
return list(self.tx_invs_received.keys())
def wait_for_broadcast(self, txns, timeout=60):
"""Waits for the txns (list of txids) to complete initial broadcast.
The mempool should mark unbroadcast=False for these transactions.
"""
# Wait until invs have been received (and getdatas sent) for each txid.
self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout)
# Flush messages and wait for the getdatas to be processed
self.sync_with_ping()
|
|
## @package memonger
# Module caffe2.python.memonger
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import networkx as nx
import collections
import time
import heapq
import copy
from caffe2.python import workspace
from caffe2.proto import caffe2_pb2
import enum
import logging
import numpy as np
from future.utils import viewitems, viewvalues
import caffe2.python._import_c_extension as C
log = logging.getLogger("memonger")
log.setLevel(logging.INFO)
LiveRange = collections.namedtuple('LiveRange', ["defined", "used", "size"])
def share_grad_blobs(
net,
losses,
param_grads,
namescope,
dont_share_blobs=None,
share_activations=False,
blob_shapes=None,
):
'''
Implements similar optimization as Torch's shareGradInput():
for the gradients that are passed between layers, share blobs between
operators when possible. This yields significant memory savings with
deep networks.
Returns an optimized protobuf (assign to net._net)
'''
def is_grad_blob(b):
name = str(b)
# Note: need to look at _{namescope} pattern as it matches
# to handle the auto-split gradients
return "_grad" in name and (name.startswith(namescope) or
name.startswith("_" + namescope)) and name not in param_grads
def is_grad_op(op):
# TODO: something smarter
for b in list(op.input) + list(op.output):
if is_grad_blob(b):
return True
return False
log.warn("NOTE: Executing memonger to optimize gradient memory")
# Collect ops that have something to do with gradients
if not namescope.endswith("/"):
namescope += "/"
netproto = copy.deepcopy(net.Proto())
activations = []
external_output = set(net.Proto().external_output)
# Hacky way to get activations, think of a better way
for op in net.Proto().op:
for b in op.output:
if b + "_w" in op.input and b not in external_output:
activations.append(b)
# Remove last activations, as they are usually accessed externally
activations = set(activations[:-2])
# Gradient ops
grad_ops = [op for op in netproto.op if is_grad_op(op)]
return _compute_blob_recycling_for_dag(
netproto,
losses,
grad_ops,
lambda b: is_grad_blob(b) or (share_activations and b in activations),
namescope,
{} if dont_share_blobs is None else dont_share_blobs,
blob_shapes
)
def optimize_inference_for_dag(net, input_blobs, namescope=""):
netproto = copy.deepcopy(net.Proto())
external_input = set(net.Proto().external_input)
external_output = set(net.Proto().external_output)
def is_activation_blob(b):
return b not in external_input and b not in external_output
seen_as_output = set()
ops = list(net.Proto().op)
# Sanity check: check that all external inputs are properlyh accounted
# and that no gradient ops are included in 'net'
for op in ops:
for b in op.input:
if is_activation_blob(b) and b not in seen_as_output:
assert False, "{} not in external input".format(b)
seen_as_output = seen_as_output.union(set(op.output))
assert not op.is_gradient_op, \
"You can only pass inference-only nets to optimize_inference_for_dag"
return _compute_blob_recycling_for_dag(
netproto, input_blobs, ops, is_activation_blob,
namescope, set(), None,
)
def _compute_blob_recycling_for_dag(
netproto, heads, ops, is_shareable,
namescope, dont_share_blobs, blob_shapes=None,
):
'''
Computes a blob recycling by traversing the computation DAG. The resulting
model can be executed safely on a DAGNet.
'''
start_time = time.time()
if dont_share_blobs is not None:
dont_share_blobs = set([str(b) for b in dont_share_blobs])
# Create mapping from blobs to ops
origproto = copy.deepcopy(netproto)
blobs_to_ops = collections.defaultdict(lambda: [])
blob_input_count = collections.defaultdict(lambda: 0)
op_inputs = collections.defaultdict(lambda: 0)
op_visit_count = collections.defaultdict(lambda: 0)
share_counts = collections.defaultdict(lambda: 0)
req_tokens = collections.defaultdict(lambda: set())
op_token_deposit = [set() for _ in ops]
blob_sizes = {} if blob_shapes is not None else None
# First figure out which of the shareable blobs
# are 'internal' to the optimization. For example, if optimizing
# only gradient ops, then activation blobs will be 'external' as they
# are not output by these ops.
optim_op_outputs = set()
for op in ops:
optim_op_outputs.update(set(op.output))
for i, op in enumerate(ops):
for inp in op.input:
if is_shareable(inp) or inp in heads:
if inp in optim_op_outputs:
blobs_to_ops[inp].append(i)
op_inputs[i] += 1
else:
# For external blobs, we don't increase the op_inputs
# count.
blobs_to_ops[inp].append(i)
share_counts[inp] = 1
output_blobs = set()
mapping = {}
unknown_shapes = set()
# Helper function to return blob size based on shape inference.
# If we don't have shape inference available, return 0.
def infer_blob_size(b):
if b in blob_shapes:
return np.prod(blob_shapes[b])
else:
unknown_shapes.add(b)
return 0
global token_seq
token_seq = 0
# Creates a next "token". Tokens are used to to keep track of
# dependendencies: a blob can be replaced by another only if that
# blob "holds" all tokens currently in scope.
def next_token():
global token_seq
token_seq += 1
return token_seq
saved_count = 0
# Main recursive function. We start recursion from the "heads" and
# only descend on an operator when all its inputs have been 'satisfied'.
# That is, all parent operators have been visited.
def descend(op_idx, free_blobs, tokens):
# Check if there are tokens left at this operator from a
# parent operator.
tokens = tokens.union(op_token_deposit[op_idx])
op_token_deposit[op_idx] = None
cur_op = ops[op_idx]
# new_free_blobs contains the blobs that we will release after
# visiting this op
new_free_blobs = set()
saved = 0
# Update the tokens assigned to blobs to be union of the
# tokens we are currently holding and the tokens already held
# by that blob.
for b in list(cur_op.input) + list(cur_op.output):
actual_blob = b if b not in mapping else mapping[b]
req_tokens[b] = req_tokens[b].union(tokens)
if actual_blob != b:
# This blob has been assigned to another (recycled) blob,
# so update the token holdings of the recycled blob.
req_tokens[actual_blob] = req_tokens[actual_blob].union(tokens)
# Check each input and increment the counters for each of the input
# blobs.
for inp in cur_op.input:
if is_shareable(inp):
blob_input_count[inp] += 1
if blob_input_count[inp] == len(blobs_to_ops[inp]):
# This input blob has been now consumed, so we
# can release it to be recycled. If it was replaced
# by another recycled blob, release the recycled blob
# instead.
actual_blob = inp if inp not in mapping else mapping[inp]
if actual_blob not in dont_share_blobs:
new_free_blobs.add(
(-share_counts[actual_blob], actual_blob),
)
def can_be_used(blob, cur_tokens):
# Do we have all required tokens, and this one
# was not released in this op?
for (_cnt, b) in new_free_blobs:
if b == blob:
return False
return len(req_tokens[blob] - cur_tokens) == 0
# Check each output to see if we see the output the first time (i.e
# it is created by this op). if it is then, we can replace it with
# a recycled blob, if available.
for outp in cur_op.output:
if is_shareable(outp):
if outp not in output_blobs:
# First seen this blob as output, can assign to a free blob
freeb = None
# We have two algorithms for choosing the blob to replace
# this one. One that uses size information and another
# that uses a priority queue that prefers blobs that are
# have been shared before.
if blob_sizes is None:
put_back = []
while len(free_blobs) > 0:
(negcnt, cand_freeb) = heapq.heappop(free_blobs)
if can_be_used(cand_freeb, tokens):
freeb = cand_freeb
break
else:
put_back.append((negcnt, cand_freeb))
for cnt, b in put_back:
heapq.heappush(free_blobs, (cnt, b))
else:
bsize = infer_blob_size(outp)
best_blob = None
best_size = -1
# Heuristic to choose the most suitably sized blob
for b in free_blobs:
if can_be_used(b, tokens):
sz = blob_sizes[b]
if sz >= best_size:
if best_size < bsize or best_size >= sz:
best_size = sz
best_blob = b
freeb = best_blob
if freeb is not None:
free_blobs.remove(freeb)
saved += bsize
# "freeb" is the blob output to be replaced with. We
# update its tokens to include the tokens being held
# now.
if freeb is not None:
req_tokens[freeb] = req_tokens[freeb].union(tokens)
mapping[outp] = freeb
share_counts[freeb] += 1
output_blobs.add(outp)
# Process blobs released during this op visit. Depending
# on whether we have blob sizes or not, we store the list
# of free blobs differently (NOTE: this should be unified).
for (cnt, nf) in new_free_blobs:
already_inserted = False
# Note: we prevent double insertion, but it can
# happen because of parallel branches. Token management
# ensures free blobs are handled correctly.
if blob_sizes is None:
for _c, b in free_blobs:
if b == nf:
already_inserted = True
if not already_inserted:
heapq.heappush(free_blobs, (cnt, nf))
else:
if nf not in blob_sizes:
blob_sizes[nf] = infer_blob_size(outp)
if nf in free_blobs:
already_inserted = True
if not already_inserted:
free_blobs.append(nf)
num_branches = 0
# Count branches
for outp in cur_op.output:
for _ in blobs_to_ops[outp]:
num_branches += 1
# Here we process each output again and see if we can descend
# down the operator graph.
for outp in cur_op.output:
for inp_op_idx in blobs_to_ops[outp]:
op_visit_count[inp_op_idx] += 1
# Descend only if we have satisfied all inputs
if op_visit_count[inp_op_idx] == op_inputs[inp_op_idx]:
assert inp_op_idx != op_idx
new_tokens = tokens
if num_branches > 1:
# Optimization
new_tokens = tokens.union(set([next_token()]))
saved_desc = descend(
inp_op_idx,
free_blobs,
new_tokens,
)
saved += saved_desc
else:
# Leave my tokens here so that they can be grabbed
# when we visit the operator (after all inputs have been
# satisfied).
if op_token_deposit[inp_op_idx] is not None:
op_token_deposit[inp_op_idx] = \
op_token_deposit[inp_op_idx].union(tokens)
return saved
# Start DFS from the heads' (losses or inputs)
for head_blob in heads:
for op_idx in blobs_to_ops[head_blob]:
if op_token_deposit[op_idx] is not None:
saved = descend(op_idx, [], set([next_token()]))
saved_count += saved
# Rename the shared blobs
shared_blobs = set(viewvalues(mapping))
renamed = {}
for j, b in enumerate(shared_blobs):
if b in optim_op_outputs:
renamed[b] = namescope + "__m{}_shared".format(j)
else:
renamed[b] = b
# Update the mapping recursively
mapping.update(renamed)
had_changes = True
while had_changes:
had_changes = False
for k, v in mapping.items():
if v in renamed and renamed[v] != v:
renamed[k] = renamed[v]
mapping[k] = renamed[k]
had_changes = True
shared_blobs = set(mapping.values())
if saved_count > 0:
log.info("Remapping {} blobs, using {} shared; saved apprx {} MB".format(
len(mapping), len(shared_blobs), int(saved_count * 4 / 1024 / 1024),
))
log.info("Could not infer sizes for: {}".format(unknown_shapes))
else:
log.info("Remapping {} blobs, using {} shared".format(
len(mapping), len(shared_blobs),
))
apply_assignments(netproto, mapping)
log.info("Memonger memory optimization took {} secs".format(
time.time() - start_time),
)
assert verify_graph_equality(origproto, netproto), \
"Memonger graph is not equal to original."
assert verify_inplace_blobs(origproto, netproto), \
"Inplace assignments differ in memonger net."
return netproto
def _find_source_nodes(g):
''' Return nodes without predecessors '''
ret = []
for cn in g:
cur_pred = g.predecessors(cn)
if not cur_pred:
ret.append(cn)
return ret
def _find_target_nodes(g):
''' Return nodes without successors '''
ret = []
for cn in g:
cur_succ = g.successors(cn)
if not cur_succ:
ret.append(cn)
return ret
def _add_single_target_ifneeded(g):
targets = _find_target_nodes(g)
assert len(targets) >= 1
if len(targets) == 1:
return g
ret = copy.deepcopy(g)
def _next_available_idx(g):
ret = -1
for cn in g:
if cn > ret:
ret = cn
ret += 1
return ret
target_node_idx = _next_available_idx(g)
ret.add_node(target_node_idx)
for cn in targets:
ret.add_edge(cn, target_node_idx)
return ret
def _get_path(pred_list, dist_list):
''' Get the path from nx.bellman_ford()'s output '''
# distances are negative
assert all(dist_list[x] <= 0 for x in dist_list)
# node with longest distance to source is the target
target = min(dist_list, key=lambda x: dist_list[x])
ret = []
cur = target
while cur is not None:
ret.append(cur)
cur = pred_list[cur]
return list(reversed(ret))
def _get_longest_paths(g, source_nodes):
''' Get the longest path for nodes in 'source_nodes'
Find with bellman_ford() by setting weight = -1
'''
ng = copy.deepcopy(g)
for u, v in ng.edges():
ng[u][v]["weight"] = -1
ret = {}
for cn in source_nodes:
pred, dist = nx.bellman_ford(ng, cn, weight="weight")
path = _get_path(pred, dist)
assert path[0] == cn
assert len(path) - 1 == -dist[path[-1]]
ret[cn] = path
return ret
def _build_tree(paths):
''' Build a tree for given paths based on common elements.
Last elements of all paths are the same, which is the root of the tree.
'''
assert all(cp[-1] == paths[0][-1] for cp in paths)
g = nx.DiGraph()
node_set = {y for x in paths for y in x}
g.add_nodes_from(node_set)
for cp in paths:
for ce in zip(cp[0:-1], cp[1:]):
g.add_edge(ce[1], ce[0])
root = paths[0][-1]
_compute_tree_height(g, root)
return (g, root)
def _compute_tree_height(g, root):
''' Compute the heights of the tree for all nodes
Height of leaves are 0
'''
def _get_height(root):
children = g.successors(root)
height = 0
if children:
child_heights = [_get_height(x) for x in children]
height = max(child_heights) + 1
g.node[root]["height"] = height
return height
_get_height(root)
def _sort_tree_leaves(g, root):
''' For each node, sort its child nodes based on the height of the nodes.
Return the leaf nodes of the tree after sorting.
'''
def _get_height(root):
return g.node[root]["height"]
def _get_sorted_leaves(root):
children = g.successors(root)
if not children:
return [root]
child_heights = [_get_height(x) for x in children]
order = sorted(range(len(children)), key=lambda x: child_heights[x])
ret = []
for co in order:
cr = children[co]
ret += _get_sorted_leaves(cr)
return ret
return _get_sorted_leaves(root)
def topological_sort_traversal_longest_path(g):
''' The graph 'g' may contain several source nodes (nodes without incoming
edge), which could be in any order and still be a valid
topological sorting result. We would like to arrange these source nodes
so that the average live spans of the computed blobs are shorter.
The idea is to sort the source nodes based on the length of their path to
the target node so that the one with longer path is used first.
This is done by:
- Add a single target node if there are multiple target nodes in 'g'.
- Find the longest path between each source and the target node.
- Convert the longest paths to a tree with the target node being the root
and source nodes being the leaves.
- Sort the nodes of the tree based on the height of the tree.
'''
gt = _add_single_target_ifneeded(g)
source_nodes = _find_source_nodes(gt)
lpaths = _get_longest_paths(gt, source_nodes)
tree, root = _build_tree(list(viewvalues(lpaths)))
sorted_sources = _sort_tree_leaves(tree, root)
assert(sorted(sorted_sources) == sorted(source_nodes))
ret = nx.topological_sort(g, sorted_sources)
assert(len(ret) == len(g.node))
return ret
def topological_sort_traversal(g):
return nx.topological_sort(g)
def compute_ranges(linearized_ops, blob_sizes=None):
if not blob_sizes:
log.warning('Provide blob sizes to get more accurate assignments.')
blobs = collections.defaultdict(
lambda: LiveRange(defined=None, used=None, size=None))
for i, op in enumerate(linearized_ops):
for blob in op.input:
used = blobs[blob].used
if used is None:
used = i
else:
used = max(used, i)
blobs[blob] = blobs[blob]._replace(used=used)
blob_size = blob_sizes[blob] if blob_sizes else None
assert not blob_sizes or blob_size is not None
blobs[blob] = blobs[blob]._replace(size=blob_size)
for blob in op.output:
defined = blobs[blob].defined
if defined is None:
defined = i
else:
defined = min(defined, i)
blobs[blob] = blobs[blob]._replace(defined=defined)
blob_size = blob_sizes[blob] if blob_sizes else None
assert not blob_sizes or blob_size is not None
blobs[blob] = blobs[blob]._replace(size=blob_size)
return blobs
def is_compatible(candidate_range, assignment, static_blobs):
(name, range_) = assignment[-1]
if name in static_blobs:
return False
if candidate_range.defined is None or range_.defined is None \
or range_.used is None:
return False
return candidate_range.defined > range_.used
def compute_blob_assignments(assignments):
blob_assignments = {}
for assignment in assignments:
if len(assignment) == 1:
continue
last_blob, _ = assignment[-1]
for (blob, _) in assignment:
blob_assignments[blob] = last_blob
return blob_assignments
def _get_max_size(assignment):
if not assignment:
return 0
ret = max([x[1].size for x in assignment])
ret = 0 if ret is None else ret
return ret
def get_memory_usage(assignments):
ret = 0
for cur in assignments:
ret += _get_max_size(cur)
return ret
def compute_assignments_greedy(ranges_sorted, init_assignments=None):
assignments = init_assignments or []
visited = {y[0] for x in assignments for y in x}
for (name, range_) in ranges_sorted:
if name in visited:
continue
assigned = False
best_assignment = 0
min_dist = float("inf")
candidate_size = range_.size or 0
for idx, assignment in enumerate(assignments):
if is_compatible(range_, assignment, []):
assigned = True
dist = abs(_get_max_size(assignment) - candidate_size)
if dist < min_dist:
min_dist = dist
best_assignment = idx
if assigned:
assignment = assignments[best_assignment]
assignment.append((name, range_))
else:
assignments.append([(name, range_)])
return assignments
def _get_count(assignments):
''' Return number of blobs in assignments '''
if assignments:
return sum([len(x) for x in assignments])
return 0
def compute_assignments_dp(ranges_sorted, init_assignment, counter=None):
''' Compute assignment for blobs in 'ranges_sorted' on top of 'init_assignment'
using dynamic programming + recursion.
ranges_sorted: blobs sorted by 'used'
init_assignment: assignment to start with, blobs in 'ranges_sorted' should
not be used in 'init_assignment'
Using f(b, k, init) to represent the best assignment for blobs b[0:k]
given initial assignment 'init', we have
f(b, k, init) = f(b, j, init) +
find_best(b[j:k], f(b, j, init))
where j is the index of the last best assignment that is independent of
blob b[k - 1] (b[k - 1] is compatible with all assignments in
f(b, j, init)), and find_best(b1, init1) gives the best assignment
for blobs in 'b1' based on the initial assignment 'init1', and blobs
b1[0:-1] should be incompatible with b1[-1]. f(b, len(b), []) gives
the best assignment for blobs 'b'.
For find_best(b, init), since b[0:-1] are not compatible with b[-1], we
could reduce it to a smaller problem to find best assignment for b[0:-1]
as
find_best(b, init) = min {
f(b[0:-1], len(b) - 1, init - x) + [x, b[-1]] for x in init, or
f(b[0:-1], len(b) - 1, init) + [b[-1]]
}
where min{} gives the assignment with minimum memory usage.
'''
def _get_compatible_prev(candidate_range, best_assignments, cur_idx):
''' Find closest position k of best_assignments that is independent of
candidate_range that candiate_range is compatible with all assignments
in best_assignments[k].
Return -1 if not found.
'''
def is_compatible_all(candidate_range, assignments):
''' return true if compatiable for all assignments in assignments '''
return all([is_compatible(candidate_range[1], x, []) for x in assignments])
ii = cur_idx - 1
while ii >= 0:
cba = best_assignments[ii]
if is_compatible_all(candidate_range, cba):
return ii
ii -= 1
return -1
def _find_best(ranges, init_assignment, prev_best_assignment, counter):
''' Find the best assignment for blobs 'ranges' given an initialized
assignment 'init_assignment'.
Blobs in ranges[0:-1] should be incompatible with blob range[-1].
'prev_best_assignment': best assignment for blobs in ranges[:-1]
By assigning ranges[-1] to each assignment k in 'init_assignment' or
in a new assignment, the problem becomes a smaller problem to find
the best assignment for ranges[0:-1] given the initial assignment
init_assigment[0:k, (k+1):-1].
'''
# Blob to check
find_range = ranges[-1]
# Blobs in ranges[0:-1] are incompatible with ranges[-1] so that we can
# reduce it to a smaller problem.
assert all(not is_compatible(x[1], [find_range], []) for x in ranges[0:-1])
sz = len(init_assignment)
best_candidates = []
# Try to assign 'find_range' to each assignment in init_assignment
for ii in range(sz):
if not is_compatible(find_range[1], init_assignment[ii], []):
continue
cur_best = copy.deepcopy(init_assignment)
cur_best[ii].append(find_range)
if len(ranges) > 1:
cur_best_tmp = [x for i, x in enumerate(cur_best) if i != ii]
# reduce to a smaller dp problem
cur_best_tmp = compute_assignments_dp(
ranges[:-1], cur_best_tmp, counter)
cur_best = cur_best_tmp + [cur_best[ii]]
best_candidates.append(cur_best)
# Try to put 'find_range' in a new assignment
best_candidates.append(prev_best_assignment + [[find_range]])
ret = min(best_candidates, key=lambda x: get_memory_usage(x))
return ret
if not counter:
counter = [0]
counter[0] += 1
if counter and counter[0] % 5000 == 0:
rs = [ranges_sorted[0][1].defined, ranges_sorted[-1][1].used]
log.info('Finding assignments {} ({} -> {})...'.format(
counter[0], rs[0], rs[1]))
init_assignment = init_assignment or []
# best_assignments[k]: best assignments for first k blobs ranges_sorted[0:(k+1)]
best_assignments = []
# Find best assignment for blobs ranges_sorted[0:ii]
for ii, cur_range in enumerate(ranges_sorted):
# closest best_assignment that is independent of ranges_sorted[ii]
prev_idx = _get_compatible_prev(cur_range, best_assignments, ii)
prev_best = copy.deepcopy(init_assignment) if prev_idx < 0 else \
copy.deepcopy(best_assignments[prev_idx])
# Need to find best assignment for blobs in 'ranges_part'
ranges_part = ranges_sorted[(prev_idx + 1):(ii + 1)]
cur_best = _find_best(
ranges_part, prev_best,
best_assignments[-1] if best_assignments else init_assignment,
counter)
assert _get_count(cur_best) == _get_count(prev_best) + len(ranges_part)
best_assignments.append(copy.deepcopy(cur_best))
assert len(best_assignments) == len(ranges_sorted)
best = best_assignments[-1]
return best
def get_updated_ranges(ranges, max_live=None):
''' Set LiveRange.defined = -1 if it is None
Set LiveRange.used = max_live if it is None
Set LiveRanee.size = 1 if it is None
'''
def _get_max_live(ranges):
max_live = max(x[1].used for x in ranges if x[1].used) + 1
return max_live
def _update_range(x, max_live, size):
cx = x
if x[1].defined is None:
cx = (cx[0], cx[1]._replace(defined=-1))
if x[1].used is None:
cx = (cx[0], cx[1]._replace(used=max_live))
if x[1].size is None:
cx = (cx[0], cx[1]._replace(size=size))
return cx
if max_live is None:
max_live = _get_max_live(ranges)
ranges = [_update_range(x, max_live, 1) for x in ranges]
return ranges
def compute_assignments(ranges, static_blobs, algo):
'''
algo: Method used to find assignments (AssignmentAlgorithm.GREEDY or
AssignmentAlgorithm.DYNAMIC_PROGRAMMING).
AssignmentAlgorithm.DYNAMIC_PROGRAMMING gives optimal solution at the
cost of more computation.
AssignmentAlgorithm.GREEDY may be better in the case 'blob_sizes' is
not provided.
'''
# Sort the ranges based on when they are last used.
# If LiveRange.used is None, then the blob is never used and could
# be consumed externally. Sort these to the end of the list as opposed
# to the beginning so that they can be shared as well.
ranges = sorted(
viewitems(ranges),
key=lambda p: (p[1].used is None, p[1].used),
)
# Update None values
ranges = get_updated_ranges(ranges)
# Sharable blobs
ranges_sharable = [x for x in ranges if x[0] not in static_blobs]
# Static blobs, not sharable
ranges_static = [x for x in ranges if x[0] in static_blobs]
log.info("Total sharable blobs {}".format(len(ranges_sharable)))
best_assignment = []
if algo == AssignmentAlgorithm.DYNAMIC_PROGRAMMING:
best_assignment = compute_assignments_dp(ranges_sharable, [])
elif algo == AssignmentAlgorithm.GREEDY:
best_assignment = compute_assignments_greedy(ranges_sharable, [])
else:
assert "Invalid algo name {}".format(algo)
best_assignment += [[x] for x in ranges_static]
# verify_assignments(best_assignment)
return best_assignment
def verify_assignments(assignments):
for cur in assignments:
for x, y in zip(cur[0:-1], cur[1:]):
assert x[1].used < y[1].defined
def compute_interference_graph(ops):
g = nx.DiGraph()
for i, op in enumerate(ops):
g.add_node(i, op=op)
for i, parent_op in enumerate(ops):
for j, child_op in enumerate(ops):
if i >= j:
continue
if any(output in child_op.input for output in parent_op.output):
deps = set(child_op.input).intersection(parent_op.output)
g.add_edge(i, j, deps=deps)
assert nx.is_directed_acyclic_graph(g), child_op
return g
Optimization = collections.namedtuple(
'Optimization', ['net', 'assignments', 'blob_assignments'])
def apply_assignments(net, blob_assignments):
def canonical_name(blob):
if blob not in blob_assignments:
return blob
return blob_assignments[blob]
for op in net.op:
# Descend into subnets of the recurrent network
if op.type.startswith('RecurrentNetwork'):
apply_recurrent_blob_assignments(op, blob_assignments, canonical_name)
for i, input_ in enumerate(op.input):
op.input[i] = canonical_name(input_)
for i, output in enumerate(op.output):
op.output[i] = canonical_name(output)
def apply_recurrent_blob_assignments(op, blob_assignments, canonical_name):
log.debug("Applying assignments to recurrent op: {}".format(op.type))
import google.protobuf.text_format as protobuftx
step_args = [a for a in op.arg if a.name.endswith("step_net")]
for step_arg in step_args:
step_proto = caffe2_pb2.NetDef()
protobuftx.Merge(step_arg.s.decode("ascii"), step_proto)
apply_assignments(step_proto, blob_assignments)
for i, einp in enumerate(step_proto.external_input):
if einp in blob_assignments:
step_proto.external_input[i] = canonical_name(einp)
step_arg.s = str(step_proto).encode("ascii")
# Store renamings
for blob, renamed in viewitems(blob_assignments):
if blob in list(op.input) + list(op.output):
a = caffe2_pb2.Argument()
a.name = blob + ".rename"
a.s = str(renamed).encode("ascii")
op.arg.extend([a])
class AssignmentAlgorithm(enum.Enum):
GREEDY = 0
DYNAMIC_PROGRAMMING = 1
def optimize_inference_fast(net, static_blobs):
optim = caffe2_pb2.NetDef()
optim_str = C.memonger_optimize_inference_net(
net.SerializeToString(), [str(s).encode('utf-8') for s in static_blobs]
)
optim.ParseFromString(optim_str)
return optim
def optimize_interference(net, static_blobs,
ordering_function=topological_sort_traversal,
blob_sizes=None,
algo=AssignmentAlgorithm.GREEDY):
"""
ordering_function: topological_sort_traversal or
topological_sort_traversal_longest_path.
topological_sort_traversal_longest_path gives better
results but needs a bit more computation.
algo: Method used to find assignments (AssignmentAlgorithm.GREEDY or
AssignmentAlgorithm.DYNAMIC_PROGRAMMING).
AssignmentAlgorithm.DYNAMIC_PROGRAMMING gives optimal solution at the
cost of more computation.
AssignmentAlgorithm.GREEDY may be better in the case 'blob_sizes' is
not provided.
"""
"""
1) Use a BFS traversal of the execution graph to generate an
ordering of the node executions.
2) Generate use-def ranges for each `blob` in the BFS traversal
order.
3) Assign blobs to `canonical blobs`
4) Rename blobs to canonical blobs
"""
net = copy.deepcopy(net)
g = compute_interference_graph(net.op)
ordering = ordering_function(g)
linearized_ops = [net.op[i] for i in ordering]
# Reorder ops in net based on the computed linearlized order.
# If the graph has multiple topological orderings and if the NetDef's
# ordering differs from the order used to compute ranges, then the
# runtime might end up overwriting blobs before they are used.
del net.op[:]
net.op.extend(linearized_ops)
ranges = compute_ranges(linearized_ops, blob_sizes)
assignments = compute_assignments(ranges, static_blobs, algo)
blob_assignments = compute_blob_assignments(assignments)
apply_assignments(net, blob_assignments)
return Optimization(
net=net,
blob_assignments=blob_assignments,
assignments=assignments)
def verify_inplace_blobs(net_a, net_b):
"""
Verifies that net_a and net_b have the same in-place blob assignments.
Particularly, that memonger did not add an in-place assignment when that
did not exist before.
"""
def get_inplaces(op):
out = list(op.output)
inplaces = []
for j, inp in enumerate(op.input):
if inp in out:
inplaces.append([j, out.index(inp)])
return inplaces
for op_a, op_b in zip(net_a.op, net_b.op):
if op_a.type != op_b.type:
return False
if get_inplaces(op_a) != get_inplaces(op_b):
return False
return True
def verify_graph_equality(net_a, net_b):
"""
Determines if the execution of two graphs are identical.
That is, all inputs blobs are mapped to the same output blobs
for each operator in their respective positions.
This is meant to check the output of memonger with the original graph.
It assumes that the nets have same external input and output.
O(E) runtime + O(1) amortized cost to hash for python dict
"""
def parent_list(ops):
parent_list = [[] for _ in ops]
edge_owner = {}
for i, op in enumerate(ops):
for blob in op.input:
parent_id = edge_owner.get(blob)
if parent_id is not None:
parent_list[i].append(parent_id)
for blob in op.output:
edge_owner[blob] = i
return parent_list
# Operator wise equality checks
if (len(net_a.op) != len(net_b.op)):
return False
for op_a, op_b in zip(net_a.op, net_b.op):
if (op_a.type != op_b.type or
op_a.device_option != op_b.device_option or
op_a.engine != op_b.engine):
return False
# Print debug info
parent_list_a = parent_list(net_a.op)
parent_list_b = parent_list(net_b.op)
if parent_list_a != parent_list_b:
j = 0
for a, b in zip(parent_list_a, parent_list_b):
if a != b:
print("Difference {} vs {} \n {}".format(
j, net_a.op[j], net_b.op[j]))
print("Parents: {} vs {}".format(a, b))
j += 1
# Net wise equality check
return parent_list_a == parent_list_b
Statistics = collections.namedtuple(
'Statistics', ['baseline_nbytes', 'optimized_nbytes'])
def blob_nbytes(blob):
sz = 0
try:
sz = workspace.FetchBlob(blob).nbytes
except Exception:
log.warning('Error when fetching blob {}'.format(blob))
return sz
def compute_statistics(assignments):
blob_bytes = {
blob: blob_nbytes(blob) for assignment in assignments
for (blob, _) in assignment}
baseline_nbytes = sum(viewvalues(blob_bytes))
optimized_nbytes = sum(
max(blob_bytes[blob] for (blob, _) in assignment)
for assignment in assignments)
return Statistics(
baseline_nbytes=baseline_nbytes,
optimized_nbytes=optimized_nbytes)
def collect_blob_sizes(net):
blobs = {}
for op in net.op:
for blob in op.input:
blobs[blob] = blob_nbytes(blob)
for blob in op.output:
blobs[blob] = blob_nbytes(blob)
return blobs
|
|
try:
import StringIO
except ImportError:
from io import StringIO
import crianza
import operator
import random
import sys
import unittest
import six
try:
from crianza import native
CRIANZA_NATIVE = True
except ImportError:
CRIANZA_NATIVE = False
fibonacci_source = \
"""
# The Fibonacci Sequence
: println dup . ;
: next swap over + ;
# Start values
0 println
1 println
# Loop forever
@ next println return
"""
class TestCrianza(unittest.TestCase):
def test_initial_conditions(self):
machine = crianza.Machine([])
machine.run()
self.assertEqual(machine.data_stack, crianza.Stack([]))
self.assertEqual(machine.return_stack, crianza.Stack([]))
self.assertEqual(machine.stack, [])
self.assertEqual(machine.instruction_pointer, 0)
self.assertEqual(machine.code, [])
self.assertEqual(machine.input, sys.stdin)
self.assertEqual(machine.output, sys.stdout)
def test_eval(self):
self.assertEqual(crianza.eval("1 2 3 4 5 * * * *"), 120)
self.assertEqual(crianza.eval("1 2 3 4 5 - - - -"), 3)
self.assertEqual(crianza.eval("1 2 3 4 5 + + + +"), 15)
def test_parser(self):
test = lambda src, tokens: self.assertEqual(crianza.parse(src), tokens)
test("1", [1])
test("1 2", [1, 2])
test("123 dup * .", [123, "dup", "*", "."])
test("1 2 3 4 5 * * * *", [1, 2, 3, 4, 5, "*", "*", "*", "*"])
test(": square\n\tdup * ;\n\n12 square .\n", [":", "square", "dup", "*",
";", 12, "square", "."])
def _test_arithmetic(self, a, b, op):
name = {"mul": "*",
"sub": "-",
"add": "+",
"mod": "%",
"div": "/"}[op.__name__]
source = "%d %d %s" % (a, b, name)
self.assertEqual(crianza.eval(source), op(a, b))
def test_random_arithmetic(self):
ops = [operator.mul, operator.add]
for op in ops:
for _ in range(100):
# TODO: Add negative numbers when our parser supports it
a = random.randint(0, +(2**31-1))
b = random.randint(0, +(2**31-1))
self._test_arithmetic(a, b, op)
def test_optimizer_errors(self):
for op in [crianza.instructions.div, crianza.instructions.mod]:
instr = crianza.instructions.lookup(op)
func = lambda: crianza.constant_fold([2, 0, instr], ignore_errors=False)
self.assertRaises(crianza.CompileError, func)
def test_optimizer(self):
self.assertEqual(crianza.constant_fold([2,3,"*","."]), [6, "."])
self.assertEqual(crianza.constant_fold([2,2,3,"*","."]), [2, 6, "."])
self.assertEqual(crianza.constant_fold([5,2,3,"*","+","."]), [11, "."])
self.assertEqual(crianza.constant_fold([5,2,3,"*","+",4,"*","."]), [44, "."])
self.assertEqual(crianza.constant_fold([2,3,"+",5,"*","write"]), [25, "write"])
self.assertEqual(crianza.constant_fold([10, "dup"]), [10, 10])
self.assertEqual(crianza.constant_fold([1,2,"dup","dup","+","+"]), [1,6])
self.assertEqual(crianza.constant_fold([1,2,3,"swap"]), [1,3,2])
self.assertEqual(crianza.constant_fold([1,2,3,"drop","drop"]), [1])
self.assertEqual(crianza.constant_fold([1, 123, "str"]), [1, "123"])
self.assertEqual(crianza.constant_fold([1, "112", "int"]), [1, 112])
self.assertEqual(crianza.constant_fold([1, 123, "str", "int"]), [1, 123])
def test_program_fibonacci(self):
code = crianza.compile(crianza.parse(fibonacci_source))
# TODO: Unembed this:
#self.assertEqual(code, native_types([0, 13, 'call', 1,
# 13, 'call', '@', 16, 'call', 13, 'call', 'return', 'exit', 'dup',
# '.', 'return', 'swap', 'over', '+', 'return']))
machine = crianza.Machine(code, output=None)
# skip to main loop
machine.run(11)
sequence = []
numbers_to_generate = 15
for its in range(0, numbers_to_generate):
sequence.append(machine.top)
machine.run(13) # next number
self.assertEqual(sequence, [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144,
233, 377, 610])
def test_io(self):
fin = six.StringIO("Input line 1.\nInput line 2.")
fout = six.StringIO()
result = crianza.eval('123 read "howdy" . .', input=fin, output=fout)
self.assertEqual(result, 123)
self.assertEqual(fin.getvalue()[fin.tell():], "Input line 2.")
self.assertEqual(fout.getvalue(), "howdy\nInput line 1.\n")
def _execfile(self, filename, input=six.StringIO(),
output=six.StringIO(), steps=1000):
with open(filename, "rt") as f:
return crianza.execute(f, input=input, output=output, steps=steps)
def test_program_even_odd(self):
fin = six.StringIO("1\n2\n3\n")
fout = six.StringIO()
m = self._execfile("tests/even-odd.source", input=fin, output=fout)
self.assertEqual(fout.getvalue(),
"Enter a number: The number 1 is odd.\n" +
"Enter a number: The number 2 is even.\n" +
"Enter a number: The number 3 is odd.\n" +
"Enter a number: ")
self.assertEqual(fin.tell(), len(fin.getvalue()))
self.assertEqual(m.top, "")
self.assertEqual(m.stack, [""])
self.assertEqual(m.return_stack, crianza.Stack([]))
def test_program_sum_mul_1(self):
fout = six.StringIO()
m = self._execfile("tests/sum-mul-1.source", output=fout)
self.assertEqual(fout.getvalue(), "(2+3) * 4 = 20\n")
self.assertEqual(m.top, None)
self.assertEqual(m.stack, [])
self.assertEqual(m.return_stack, crianza.Stack([]))
def test_program_sum_mul_2(self):
fin = six.StringIO("12\n34\n")
fout = six.StringIO()
m = self._execfile("tests/sum-mul-2.source", input=fin, output=fout)
self.assertEqual(fout.getvalue(),
"Enter a number: " +
"Enter another number: " +
"Their sum is: 46\n" +
"Their product is: 408\n")
self.assertEqual(m.top, None)
self.assertEqual(m.stack, [])
self.assertEqual(m.return_stack, crianza.Stack([]))
def test_program_subroutine_1(self):
fout = six.StringIO()
m = self._execfile("tests/subroutine-1.source", output=fout)
self.assertEqual(fout.getvalue(), "one\ntwo\nthree\n144\nfinished\n")
self.assertEqual(m.top, 0)
self.assertEqual(m.stack, ["one", "two", "three", 144, 0])
self.assertEqual(m.return_stack, crianza.Stack([]))
def test_program_fibonacci_1(self):
fout = six.StringIO()
m = self._execfile("tests/fibonacci.source", output=fout, steps=100)
self.assertEqual(fout.getvalue(),
"0\n1\n1\n2\n3\n5\n8\n13\n21\n34\n55\n89\n144\n233\n377\n")
self.assertEqual(m.top, 610)
self.assertEqual(m.stack, [377, 610])
self.assertEqual(m.return_stack, crianza.Stack([]))
def test_program_fibonacci_2(self):
fout = six.StringIO()
m = self._execfile("tests/fibonacci-2.source", output=fout, steps=180)
self.assertEqual(fout.getvalue(),
"0\n1\n1\n2\n3\n5\n8\n13\n21\n34\n55\n89\n144\n233\n377\n")
self.assertEqual(m.top, 377)
self.assertEqual(m.stack, [233, 377])
self.assertEqual(m.return_stack, crianza.Stack([6]))
class TestCrianzaNative(unittest.TestCase):
@unittest.skipUnless(CRIANZA_NATIVE, "crianza.native unsupported")
def test_mul2(self):
code = crianza.compile(crianza.parse("2 *"))
mul2 = crianza.native.compile(code, args=1, name="mul2",
docstring="Multiplies number with two.")
self.assertIsNotNone(mul2)
self.assertEqual(mul2.__doc__, "Multiplies number with two.")
self.assertEqual(mul2.__name__, "mul2")
for n in xrange(100):
self.assertEqual(n*2, mul2(n))
for __ in range(10):
n = random.randint(-1000000, 1000000)
self.assertEqual(n*2, mul2(n))
if __name__ == "__main__":
unittest.main()
|
|
import urllib2
import pycmds
_RATES = {}
class Currency(str):
currencies = {
'Maltese Lira (MTL)': 'MTL',
'Ukraine Hryvnia (UAH)': 'UAH',
'Rwanda Franc (RWF)': 'RWF',
'Mauritania Ougulya (MRO)': 'MRO',
'Ugandan Shilling (UGX)': 'UGX',
'Swedish Krona (SEK)': 'SEK',
'Palladium Ounces (XPD)': 'XPD',
'Moroccan Dirham (MAD)': 'MAD',
'Mauritius Rupee (MUR)': 'MUR',
'Lesotho Loti (LSL)': 'LSL',
'Lebanese Pound (LBP)': 'LBP',
'Bermuda Dollar (BMD)': 'BMD',
'Cambodia Riel (KHR)': 'KHR',
'Singapore Dollar (SGD)': 'SGD',
'Aluminium Ounces (XAL)': 'XAL',
'New Turkish Lira (TRY)': 'TRY',
'Latvian Lat (LVL)': 'LVL',
'Malaysian Ringgit (MYR)': 'MYR',
'Macau Pataca (MOP)': 'MOP',
'Thai Baht (THB)': 'THB',
'Liberian Dollar (LRD)': 'LRD',
'Paraguayan Guarani (PYG)': 'PYG',
'Chinese Yuan (CNY)': 'CNY',
'Panama Balboa (PAB)': 'PAB',
'Haiti Gourde (HTG)': 'HTG',
'Iraqi Dinar (IQD)': 'IQD',
'Vanuatu Vatu (VUV)': 'VUV',
'Swiss Franc (CHF)': 'CHF',
'Argentine Peso (ARS)': 'ARS',
'Guatemala Quetzal (GTQ)': 'GTQ',
'Japanese Yen (JPY)': 'JPY',
'Hungarian Forint (HUF)': 'HUF',
'Vietnam Dong (VND)': 'VND',
'Bahraini Dinar (BHD)': 'BHD',
'Copper Pounds (XCP)': 'XCP',
'Zambian Kwacha (ZMK)': 'ZMK',
'Aruba Florin (AWG)': 'AWG',
'Barbados Dollar (BBD)': 'BBD',
'Estonian Kroon (EEK)': 'EEK',
'Bolivian Boliviano (BOB)': 'BOB',
'Libyan Dinar (LYD)': 'LYD',
'Dijibouti Franc (DJF)': 'DJF',
'Philippine Peso (PHP)': 'PHP',
'Samoa Tala (WST)': 'WST',
'Omani Rial (OMR)': 'OMR',
'Cuban Peso (CUP)': 'CUP',
'Mongolian Tugrik (MNT)': 'MNT',
'Platinum Ounces (XPT)': 'XPT',
'Danish Krone (DKK)': 'DKK',
'Tunisian Dinar (TND)': 'TND',
'Gambian Dalasi (GMD)': 'GMD',
'U.S. Dollar (USD)': 'USD',
'Hong Kong Dollar (HKD)': 'HKD',
'Gibraltar Pound (GIP)': 'GIP',
'Brazilian Real (BRL)': 'BRL',
'Tanzanian Shilling (TZS)': 'TZS',
'Guyana Dollar (GYD)': 'GYD',
'Belarus Ruble (BYR)': 'BYR',
'Swaziland Lilageni (SZL)': 'SZL',
'Bangladesh Taka (BDT)': 'BDT',
'Costa Rica Colon (CRC)': 'CRC',
'Malawi Kwacha (MWK)': 'MWK',
'Algerian Dinar (DZD)': 'DZD',
'Kenyan Shilling (KES)': 'KES',
'Venezuelan Bolivar (VEB)': 'VEB',
'Namibian Dollar (NAD)': 'NAD',
'Bulgarian Lev (BGN)': 'BGN',
'Myanmar Kyat (MMK)': 'MMK',
'Uruguayan New Peso (UYU)': 'UYU',
'Colombian Peso (COP)': 'COP',
'Gold Ounces (XAU)': 'XAU',
'Croatian Kuna (HRK)': 'HRK',
'Russian Rouble (RUB)': 'RUB',
'East Caribbean Dollar (XCD)': 'XCD',
'Albanian Lek (ALL)': 'ALL',
'Slovak Koruna (SKK)': 'SKK',
'Cyprus Pound (CYP)': 'CYP',
'Ethiopian Birr (ETB)': 'ETB',
'Yemen Riyal (YER)': 'YER',
'Sierra Leone Leone (SLL)': 'SLL',
'Guinea Franc (GNF)': 'GNF',
'Fiji Dollar (FJD)': 'FJD',
'Israeli Shekel (ILS)': 'ILS',
'Nigerian Naira (NGN)': 'NGN',
'Zimbabwe Dollar (ZWD)': 'ZWD',
'Chilean Peso (CLP)': 'CLP',
'Brunei Dollar (BND)': 'BND',
'Taiwan Dollar (TWD)': 'TWD',
'Macedonian Denar (MKD)': 'MKD',
'Silver Ounces (XAG)': 'XAG',
'Neth Antilles Guilder (ANG)': 'ANG',
'Syrian Pound (SYP)': 'SYP',
'Dominican Peso (DOP)': 'DOP',
'Falkland Islands Pound (FKP)': 'FKP',
'Polish Zloty (PLN)': 'PLN',
'Indonesian Rupiah (IDR)': 'IDR',
'Honduras Lempira (HNL)': 'HNL',
'Romanian New Leu (RON)': 'RON',
'Lithuanian Lita (LTL)': 'LTL',
'Egyptian Pound (EGP)': 'EGP',
'Nepalese Rupee (NPR)': 'NPR',
'British Pound (GBP)': 'GBP',
'Peruvian Nuevo Sol (PEN)': 'PEN',
'Iran Rial (IRR)': 'IRR',
'Papua New Guinea Kina (PGK)': 'PGK',
'Qatar Rial (QAR)': 'QAR',
"Tonga Pa'anga (TOP)": 'TOP',
'Euro (EUR)': 'EUR',
'Pakistani Rupee (PKR)': 'PKR',
'Ecuador Sucre (ECS)': 'ECS',
'St Helena Pound (SHP)': 'SHP',
'South African Rand (ZAR)': 'ZAR',
'Botswana Pula (BWP)': 'BWP',
'Kuwaiti Dinar (KWD)': 'KWD',
'Bhutan Ngultrum (BTN)': 'BTN',
'Cape Verde Escudo (CVE)': 'CVE',
'CFA Franc (BEAC) (XAF)': 'XAF',
'Saudi Arabian Riyal (SAR)': 'SAR',
'New Zealand Dollar (NZD)': 'NZD',
'Norwegian Krone (NOK)': 'NOK',
'Solomon Islands Dollar (SBD)': 'SBD',
'Bahamian Dollar (BSD)': 'BSD',
'Seychelles Rupee (SCR)': 'SCR',
'Australian Dollar (AUD)': 'AUD',
'Eritrea Nakfa (ERN)': 'ERN',
'Iceland Krona (ISK)': 'ISK',
'Comoros Franc (KMF)': 'KMF',
'Nicaragua Cordoba (NIO)': 'NIO',
'Sri Lanka Rupee (LKR)': 'LKR',
'Lao Kip (LAK)': 'LAK',
'Mexican Peso (MXN)': 'MXN',
'Jamaican Dollar (JMD)': 'JMD',
'Kazakhstan Tenge (KZT)': 'KZT',
'Slovenian Tolar (SIT)': 'SIT',
'Indian Rupee (INR)': 'INR',
'Sudanese Dinar (SDD)': 'SDD',
'UAE Dirham (AED)': 'AED',
'Czech Koruna (CZK)': 'CZK',
'El Salvador Colon (SVC)': 'SVC',
'Canadian Dollar (CAD)': 'CAD',
'Korean Won (KRW)': 'KRW',
'Moldovan Leu (MDL)': 'MDL',
'North Korean Won (KPW)': 'KPW',
'Sao Tome Dobra (STD)': 'STD',
'Ghanian Cedi (GHC)': 'GHC',
'Belize Dollar (BZD)': 'BZD',
'Trinidad&Tobago Dollar (TTD)': 'TTD',
'Maldives Rufiyaa (MVR)': 'MVR',
'Pacific Franc (XPF)': 'XPF',
'CFA Franc (BCEAO) (XOF)': 'XOF',
'Burundi Franc (BIF)': 'BIF',
'Somali Shilling (SOS)': 'SOS',
'Cayman Islands Dollar (KYD)': 'KYD',
'Jordanian Dinar (JOD)': 'JOD'
}
def __init__(self, string):
if string in self.currencies:
self.name = string
self.symbol = self.currencies[string]
return
string = string.lower()
for currency in self.currencies:
if string == currency.lower():
self.name = currency
self.symbol = self.currencies[currency]
return
raise TypeError
@classmethod
def suggest(cls, prefix):
suggestions = []
for currency in cls.currencies:
if currency.lower().startswith(prefix):
suggestions.append(currency)
return suggestions
def get_rate(symbols):
rate = _RATES.get(symbols)
if rate:
return rate
csv = urllib2.urlopen("http://download.finance.yahoo.com/d/quotes.csv?s=" + \
symbols + "=X&f=sl1d1t1ba&e=.csv").read()
rate = float(csv.split(',')[1])
_RATES[symbols] = rate
return rate
@pycmds.cmd("currency convert [amount] from [currency A] to [currency B]", float, Currency, Currency)
def convert(amount, currencyA, currencyB):
symbols = currencyA.symbol + currencyB.symbol
rate = get_rate(symbols)
return str(amount * rate) + ' ' + currencyB.name + '<br>' + \
'Exchange rates provided by <a href="http://finance.yahoo.com/">Yahoo! Finance</a>.'
|
|
from wtforms import fields, validators
from sqlalchemy import Boolean, Column
from flask.ext.admin import form
from flask.ext.admin.form import Select2Field
from flask.ext.admin.model.form import (converts, ModelConverterBase,
InlineModelConverterBase, FieldPlaceholder)
from flask.ext.admin.model.fields import AjaxSelectField, AjaxSelectMultipleField
from flask.ext.admin.model.helpers import prettify_name
from flask.ext.admin._backwards import get_property
from flask.ext.admin._compat import iteritems
from .validators import Unique
from .fields import QuerySelectField, QuerySelectMultipleField, InlineModelFormList
from .tools import is_inherited_primary_key, get_column_for_current_model, has_multiple_pks
from .ajax import create_ajax_loader
try:
# Field has better input parsing capabilities.
from wtforms.ext.dateutil.fields import DateTimeField
except ImportError:
from wtforms.fields import DateTimeField
class AdminModelConverter(ModelConverterBase):
"""
SQLAlchemy model to form converter
"""
def __init__(self, session, view):
super(AdminModelConverter, self).__init__()
self.session = session
self.view = view
def _get_label(self, name, field_args):
"""
Label for field name. If it is not specified explicitly,
then the views prettify_name method is used to find it.
:param field_args:
Dictionary with additional field arguments
"""
if 'label' in field_args:
return field_args['label']
column_labels = get_property(self.view, 'column_labels', 'rename_columns')
if column_labels:
return column_labels.get(name)
prettify_override = getattr(self.view, 'prettify_name', None)
if prettify_override:
return prettify_override(name)
return prettify_name(name)
def _get_description(self, name, field_args):
if 'description' in field_args:
return field_args['description']
column_descriptions = getattr(self.view, 'column_descriptions', None)
if column_descriptions:
return column_descriptions.get(name)
def _get_field_override(self, name):
form_overrides = getattr(self.view, 'form_overrides', None)
if form_overrides:
return form_overrides.get(name)
return None
def _model_select_field(self, prop, multiple, remote_model, **kwargs):
loader = getattr(self.view, '_form_ajax_refs', {}).get(prop.key)
if loader:
if multiple:
return AjaxSelectMultipleField(loader, **kwargs)
else:
return AjaxSelectField(loader, **kwargs)
if 'query_factory' not in kwargs:
kwargs['query_factory'] = lambda: self.session.query(remote_model)
if 'widget' not in kwargs:
if multiple:
kwargs['widget'] = form.Select2Widget(multiple=True)
else:
kwargs['widget'] = form.Select2Widget()
if multiple:
return QuerySelectMultipleField(**kwargs)
else:
return QuerySelectField(**kwargs)
def _convert_relation(self, prop, kwargs):
# Check if relation is specified
form_columns = getattr(self.view, 'form_columns', None)
if form_columns and prop.key not in form_columns:
return None
remote_model = prop.mapper.class_
column = prop.local_remote_pairs[0][0]
# If this relation points to local column that's not foreign key, assume
# that it is backref and use remote column data
if not column.foreign_keys:
column = prop.local_remote_pairs[0][1]
kwargs['label'] = self._get_label(prop.key, kwargs)
kwargs['description'] = self._get_description(prop.key, kwargs)
if column.nullable or prop.direction.name != 'MANYTOONE':
kwargs['validators'].append(validators.Optional())
else:
kwargs['validators'].append(validators.InputRequired())
# Contribute model-related parameters
if 'allow_blank' not in kwargs:
kwargs['allow_blank'] = column.nullable
# Override field type if necessary
override = self._get_field_override(prop.key)
if override:
return override(**kwargs)
if prop.direction.name == 'MANYTOONE' or not prop.uselist:
return self._model_select_field(prop, False, remote_model, **kwargs)
elif prop.direction.name == 'ONETOMANY':
return self._model_select_field(prop, True, remote_model, **kwargs)
elif prop.direction.name == 'MANYTOMANY':
return self._model_select_field(prop, True, remote_model, **kwargs)
def convert(self, model, mapper, prop, field_args, hidden_pk):
# Properly handle forced fields
if isinstance(prop, FieldPlaceholder):
return form.recreate_field(prop.field)
kwargs = {
'validators': [],
'filters': []
}
if field_args:
kwargs.update(field_args)
# Check if it is relation or property
if hasattr(prop, 'direction'):
return self._convert_relation(prop, kwargs)
else:
# Ignore pk/fk
if hasattr(prop, 'columns'):
# Check if more than one column mapped to the property
if len(prop.columns) != 1:
if is_inherited_primary_key(prop):
column = get_column_for_current_model(prop)
else:
raise TypeError('Can not convert multiple-column properties (%s.%s)' % (model, prop.key))
else:
# Grab column
column = prop.columns[0]
form_columns = getattr(self.view, 'form_columns', None) or ()
# Do not display foreign keys - use relations, except when explicitly instructed
if column.foreign_keys and prop.key not in form_columns:
return None
# Only display "real" columns
if not isinstance(column, Column):
return None
unique = False
if column.primary_key:
if hidden_pk:
# If requested to add hidden field, show it
return fields.HiddenField()
else:
# By default, don't show primary keys either
# If PK is not explicitly allowed, ignore it
if prop.key not in form_columns:
return None
# Current Unique Validator does not work with multicolumns-pks
if not has_multiple_pks(model):
kwargs['validators'].append(Unique(self.session,
model,
column))
unique = True
# If field is unique, validate it
if column.unique and not unique:
kwargs['validators'].append(Unique(self.session,
model,
column))
optional_types = getattr(self.view, 'form_optional_types', (Boolean,))
if not column.nullable and not isinstance(column.type, optional_types):
kwargs['validators'].append(validators.InputRequired())
# Apply label and description if it isn't inline form field
if self.view.model == mapper.class_:
kwargs['label'] = self._get_label(prop.key, kwargs)
kwargs['description'] = self._get_description(prop.key, kwargs)
# Figure out default value
default = getattr(column, 'default', None)
value = None
if default is not None:
value = getattr(default, 'arg', None)
if value is not None:
if getattr(default, 'is_callable', False):
value = lambda: default.arg(None)
else:
if not getattr(default, 'is_scalar', True):
value = None
if value is not None:
kwargs['default'] = value
# Check nullable
if column.nullable:
kwargs['validators'].append(validators.Optional())
# Override field type if necessary
override = self._get_field_override(prop.key)
if override:
return override(**kwargs)
# Check choices
form_choices = getattr(self.view, 'form_choices', None)
if mapper.class_ == self.view.model and form_choices:
choices = form_choices.get(column.key)
if choices:
return Select2Field(
choices=choices,
allow_blank=column.nullable,
**kwargs
)
# Run converter
converter = self.get_converter(column)
if converter is None:
return None
return converter(model=model, mapper=mapper, prop=prop,
column=column, field_args=kwargs)
return None
@classmethod
def _string_common(cls, column, field_args, **extra):
if column.type.length:
field_args['validators'].append(validators.Length(max=column.type.length))
@converts('String', 'Unicode')
def conv_String(self, column, field_args, **extra):
if hasattr(column.type, 'enums'):
field_args['validators'].append(validators.AnyOf(column.type.enums))
field_args['choices'] = [(f, f) for f in column.type.enums]
return form.Select2Field(**field_args)
if column.nullable:
filters = field_args.get('filters', [])
filters.append(lambda x: x or None)
field_args['filters'] = filters
self._string_common(column=column, field_args=field_args, **extra)
return fields.TextField(**field_args)
@converts('Text', 'UnicodeText',
'sqlalchemy.types.LargeBinary', 'sqlalchemy.types.Binary')
def conv_Text(self, field_args, **extra):
self._string_common(field_args=field_args, **extra)
return fields.TextAreaField(**field_args)
@converts('Boolean')
def conv_Boolean(self, field_args, **extra):
return fields.BooleanField(**field_args)
@converts('Date')
def convert_date(self, field_args, **extra):
field_args['widget'] = form.DatePickerWidget()
return fields.DateField(**field_args)
@converts('DateTime')
def convert_datetime(self, field_args, **extra):
field_args['widget'] = form.DateTimePickerWidget()
return DateTimeField(**field_args)
@converts('Time')
def convert_time(self, field_args, **extra):
return form.TimeField(**field_args)
@converts('Integer', 'SmallInteger')
def handle_integer_types(self, column, field_args, **extra):
unsigned = getattr(column.type, 'unsigned', False)
if unsigned:
field_args['validators'].append(validators.NumberRange(min=0))
return fields.IntegerField(**field_args)
@converts('Numeric', 'Float')
def handle_decimal_types(self, column, field_args, **extra):
places = getattr(column.type, 'scale', 2)
if places is not None:
field_args['places'] = places
return fields.DecimalField(**field_args)
@converts('databases.mysql.MSYear')
def conv_MSYear(self, field_args, **extra):
field_args['validators'].append(validators.NumberRange(min=1901, max=2155))
return fields.TextField(**field_args)
@converts('databases.postgres.PGInet', 'dialects.postgresql.base.INET')
def conv_PGInet(self, field_args, **extra):
field_args.setdefault('label', u'IP Address')
field_args['validators'].append(validators.IPAddress())
return fields.TextField(**field_args)
@converts('dialects.postgresql.base.MACADDR')
def conv_PGMacaddr(self, field_args, **extra):
field_args.setdefault('label', u'MAC Address')
field_args['validators'].append(validators.MacAddress())
return fields.TextField(**field_args)
@converts('dialects.postgresql.base.UUID')
def conv_PGUuid(self, field_args, **extra):
field_args.setdefault('label', u'UUID')
field_args['validators'].append(validators.UUID())
return fields.TextField(**field_args)
@converts('sqlalchemy.dialects.postgresql.base.ARRAY')
def conv_ARRAY(self, field_args, **extra):
return form.Select2TagsField(save_as_list=True, **field_args)
def _resolve_prop(prop):
"""
Resolve proxied property
:param prop:
Property to resolve
"""
# Try to see if it is proxied property
if hasattr(prop, '_proxied_property'):
return prop._proxied_property
return prop
# Get list of fields and generate form
def get_form(model, converter,
base_class=form.BaseForm,
only=None,
exclude=None,
field_args=None,
hidden_pk=False,
ignore_hidden=True,
extra_fields=None):
"""
Generate form from the model.
:param model:
Model to generate form from
:param converter:
Converter class to use
:param base_class:
Base form class
:param only:
Include fields
:param exclude:
Exclude fields
:param field_args:
Dictionary with additional field arguments
:param hidden_pk:
Generate hidden field with model primary key or not
:param ignore_hidden:
If set to True (default), will ignore properties that start with underscore
"""
# TODO: Support new 0.8 API
if not hasattr(model, '_sa_class_manager'):
raise TypeError('model must be a sqlalchemy mapped model')
mapper = model._sa_class_manager.mapper
field_args = field_args or {}
properties = ((p.key, p) for p in mapper.iterate_properties)
if only:
props = dict(properties)
def find(name):
# If field is in extra_fields, it has higher priority
if extra_fields and name in extra_fields:
return FieldPlaceholder(extra_fields[name])
# Try to look it up in properties list first
p = props.get(name)
if p is not None:
return p
# If it is hybrid property or alias, look it up in a model itself
p = getattr(model, name, None)
if p is not None and hasattr(p, 'property'):
return p.property
raise ValueError('Invalid model property name %s.%s' % (model, name))
# Filter properties while maintaining property order in 'only' list
properties = ((x, find(x)) for x in only)
elif exclude:
properties = (x for x in properties if x[0] not in exclude)
field_dict = {}
for name, p in properties:
# Ignore protected properties
if ignore_hidden and name.startswith('_'):
continue
prop = _resolve_prop(p)
field = converter.convert(model, mapper, prop, field_args.get(name), hidden_pk)
if field is not None:
field_dict[name] = field
# Contribute extra fields
if not only and extra_fields:
for name, field in iteritems(extra_fields):
field_dict[name] = form.recreate_field(field)
return type(model.__name__ + 'Form', (base_class, ), field_dict)
class InlineModelConverter(InlineModelConverterBase):
"""
Inline model form helper.
"""
inline_field_list_type = InlineModelFormList
"""
Used field list type.
If you want to do some custom rendering of inline field lists,
you can create your own wtforms field and use it instead
"""
def __init__(self, session, view, model_converter):
"""
Constructor.
:param session:
SQLAlchemy session
:param view:
Flask-Admin view object
:param model_converter:
Model converter class. Will be automatically instantiated with
appropriate `InlineFormAdmin` instance.
"""
super(InlineModelConverter, self).__init__(view)
self.session = session
self.model_converter = model_converter
def get_info(self, p):
info = super(InlineModelConverter, self).get_info(p)
# Special case for model instances
if info is None:
if hasattr(p, '_sa_class_manager'):
return self.form_admin_class(p)
else:
model = getattr(p, 'model', None)
if model is None:
raise Exception('Unknown inline model admin: %s' % repr(p))
attrs = dict()
for attr in dir(p):
if not attr.startswith('_') and attr != 'model':
attrs[attr] = getattr(p, attr)
return self.form_admin_class(model, **attrs)
info = self.form_admin_class(model, **attrs)
# Resolve AJAX FKs
info._form_ajax_refs = self.process_ajax_refs(info)
return info
def process_ajax_refs(self, info):
refs = getattr(info, 'form_ajax_refs', None)
result = {}
if refs:
for name, opts in iteritems(refs):
new_name = '%s-%s' % (info.model.__name__.lower(), name)
loader = None
if isinstance(opts, dict):
loader = create_ajax_loader(info.model, self.session, new_name, name, opts)
else:
loader = opts
result[name] = loader
self.view._form_ajax_refs[new_name] = loader
return result
def contribute(self, model, form_class, inline_model):
"""
Generate form fields for inline forms and contribute them to
the `form_class`
:param converter:
ModelConverterBase instance
:param session:
SQLAlchemy session
:param model:
Model class
:param form_class:
Form to add properties to
:param inline_model:
Inline model. Can be one of:
- ``tuple``, first value is related model instance,
second is dictionary with options
- ``InlineFormAdmin`` instance
- Model class
:return:
Form class
"""
mapper = model._sa_class_manager.mapper
info = self.get_info(inline_model)
# Find property from target model to current model
target_mapper = info.model._sa_class_manager.mapper
reverse_prop = None
for prop in target_mapper.iterate_properties:
if hasattr(prop, 'direction'):
if issubclass(model, prop.mapper.class_):
reverse_prop = prop
break
else:
raise Exception('Cannot find reverse relation for model %s' % info.model)
# Find forward property
forward_prop = None
for prop in mapper.iterate_properties:
if hasattr(prop, 'direction'):
if prop.mapper.class_ == target_mapper.class_:
forward_prop = prop
break
else:
raise Exception('Cannot find forward relation for model %s' % info.model)
# Remove reverse property from the list
ignore = [reverse_prop.key]
if info.form_excluded_columns:
exclude = ignore + list(info.form_excluded_columns)
else:
exclude = ignore
# Create converter
converter = self.model_converter(self.session, info)
# Create form
child_form = info.get_form()
if child_form is None:
child_form = get_form(info.model,
converter,
only=info.form_columns,
exclude=exclude,
field_args=info.form_args,
hidden_pk=True)
# Post-process form
child_form = info.postprocess_form(child_form)
kwargs = dict()
label = self.get_label(info, forward_prop.key)
if label:
kwargs['label'] = label
# Contribute field
setattr(form_class,
forward_prop.key,
self.inline_field_list_type(child_form,
self.session,
info.model,
reverse_prop.key,
info,
**kwargs))
return form_class
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2014 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import os
import re
import requests
from .. import base
from .. import mongo_replicaset
from girder.utility.s3_assetstore_adapter import botoConnectS3
Chunk1, Chunk2 = ('hello ', 'world')
def setUpModule():
base.startServer(mockS3=True)
def tearDownModule():
base.stopServer()
def _send_s3_request(request, data=None):
"""
Send a request to an S3 server.
:param request: a dictionary of headers, method, and url.
:param data: data to include in the request.
:returns: the result of the request.
"""
if request['method'] == 'PUT':
req = requests.put(url=request['url'], data=data,
headers=request.get('headers', {}))
elif request['method'] == 'POST':
req = requests.post(url=request['url'], data=data,
headers=request.get('headers', {}))
return req
class UploadTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
admin = {
'email': 'admin@email.com',
'login': 'admin',
'firstName': 'Admin',
'lastName': 'Admin',
'password': 'adminpassword',
'admin': True
}
self.admin = self.model('user').createUser(**admin)
user = {
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword',
'admin': False
}
self.user = self.model('user').createUser(**user)
folders = self.model('folder').childFolders(
parent=self.user, parentType='user', user=self.user)
for folder in folders:
if folder['public'] is True:
self.folder = folder
def _uploadFile(self, name, partial=False, largeFile=False):
"""Upload a file either completely or partially.
:param name: the name of the file to upload.
:param partial: the number of steps to complete in the uploads: 0
initializes the upload, 1 uploads 1 chunk, etc. False
to complete the upload.
:param largeFile: if True, upload a file that is > 32Mb
:returns: the upload record which includes the upload id."""
if not largeFile:
chunk1 = Chunk1
chunk2 = Chunk2
else:
chunk1 = '-' * (1024 * 1024 * 32)
chunk2 = '-' * (1024 * 1024 * 1)
resp = self.request(
path='/file', method='POST', user=self.user, params={
'parentType': 'folder',
'parentId': self.folder['_id'],
'name': name,
'size': len(chunk1) + len(chunk2),
'mimeType': 'text/plain'
})
self.assertStatusOk(resp)
upload = resp.json
if partial is not False and partial == 0:
return upload
if 's3' not in upload:
fields = [('offset', 0), ('uploadId', upload['_id'])]
files = [('chunk', 'helloWorld.txt', chunk1)]
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatusOk(resp)
if partial is not False:
return resp.json
fields = [('offset', len(chunk1)), ('uploadId', upload['_id'])]
files = [('chunk', 'helloWorld.txt', chunk2)]
resp = self.multipartRequest(
path='/file/chunk', user=self.user, fields=fields, files=files)
self.assertStatusOk(resp)
return upload
# s3 uses a different method for uploading chunks
# This has no error checking at all
if not upload['s3']['chunked']:
_send_s3_request(upload['s3']['request'], chunk1+chunk2)
if partial is not False:
return
else:
chunk1 = chunk1+chunk2
s3resp = _send_s3_request(upload['s3']['request'])
matches = re.search('<UploadId>(.*)</UploadId>', s3resp.text)
s3uploadId = matches.groups()[0]
offset = 0
chunkN = 1
while len(chunk1):
params = {'offset': offset, 'uploadId': upload['_id']}
params["chunk"] = json.dumps({'s3UploadId': s3uploadId,
'partNumber': chunkN})
resp = self.request(path='/file/chunk', method='POST',
user=self.user, params=params)
self.assertStatusOk(resp)
upload = resp.json
if len(chunk1) > upload['s3']['chunkLength']:
chunk2 = chunk1[upload['s3']['chunkLength']:]
chunk1 = chunk1[:upload['s3']['chunkLength']]
else:
chunk2 = ""
_send_s3_request(upload['s3']['request'], chunk1)
chunk1 = chunk2
if partial is not False:
partial -= 1
chunkN += 1
if partial is not False and not partial:
return upload
resp = self.request(path='/file/completion', method='POST',
user=self.user,
params={'uploadId': upload['_id']})
self.assertStatusOk(resp)
if 's3FinalizeRequest' in resp.json:
_send_s3_request(resp.json['s3FinalizeRequest'])
return upload
def _testUpload(self):
"""Upload a file to the server and several partial files. Test that we
can delete a partial upload but not a completed upload. Test that was
can delete partial uploads that are older than a certain date."""
completeUpload = self._uploadFile('complete_upload')
# test uploading large files
self._uploadFile('complete_upload', largeFile=True)
partialUploads = []
for largeFile in (False, True):
for partial in range(3):
partialUploads.append(self._uploadFile(
'partial_upload_%d_%s' % (partial, str(largeFile)),
partial, largeFile))
# check that a user cannot list partial uploads
resp = self.request(path='/system/uploads', method='GET',
user=self.user)
self.assertStatus(resp, 403)
# The admin user should see all of the partial uploads, but not the
# complete upload
resp = self.request(path='/system/uploads', method='GET',
user=self.admin)
self.assertStatusOk(resp)
foundUploads = resp.json
self.assertEqual(len(foundUploads), len(partialUploads))
# The user shouldn't be able to delete an upload
resp = self.request(path='/system/uploads', method='GET',
user=self.user,
params={'uploadId': partialUploads[0]['_id']})
self.assertStatus(resp, 403)
# We shouldn't be able to delete the completed upload
resp = self.request(path='/system/uploads', method='DELETE',
user=self.admin,
params={'uploadId': completeUpload['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
resp = self.request(path='/system/uploads', method='GET',
user=self.admin)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to ask for a partial upload by id
resp = self.request(path='/system/uploads', method='GET',
user=self.admin,
params={'uploadId': partialUploads[0]['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], partialUploads[0]['_id'])
# The admin should be able to ask for a partial upload by assetstore id
resp = self.request(path='/system/uploads', method='GET',
user=self.admin,
params={'assetstoreId': self.assetstore['_id']})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to ask for a partial upload by age.
# Everything should be more than 0 days old
resp = self.request(path='/system/uploads', method='GET',
user=self.admin,
params={'minimumAge': 0})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), len(partialUploads))
# The admin should be able to delete an upload
resp = self.request(path='/system/uploads', method='DELETE',
user=self.admin,
params={'uploadId': partialUploads[0]['_id']})
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], partialUploads[0]['_id'])
# We should now have one less partial upload
resp = self.request(path='/system/uploads', method='GET',
user=self.admin)
self.assertEqual(len(resp.json), len(partialUploads)-1)
# If we ask to delete everything more than one day old, nothing should
# be deleted.
resp = self.request(path='/system/uploads', method='DELETE',
user=self.admin,
params={'minimumAge': 1})
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
# Delete all partial uploads
resp = self.request(path='/system/uploads', method='DELETE',
user=self.admin)
self.assertStatusOk(resp)
resp = self.request(path='/system/uploads', method='GET',
user=self.admin)
self.assertEqual(resp.json, [])
def testFilesystemAssetstoreUpload(self):
self._testUpload()
def testGridFSAssetstoreUpload(self):
# Clear any old DB data
base.dropGridFSDatabase('girder_assetstore_upload_test')
# Clear the assetstore database and create a GridFS assetstore
self.model('assetstore').remove(self.model('assetstore').getCurrent())
assetstore = self.model('assetstore').createGridFsAssetstore(
name='Test', db='girder_assetstore_upload_test')
self.assetstore = assetstore
self._testUpload()
def testGridFSReplicaSetAssetstoreUpload(self):
verbose = 0
if 'REPLICASET' in os.environ.get('EXTRADEBUG', '').split():
verbose = 2
# Starting the replica sets takes time (~25 seconds)
mongo_replicaset.startMongoReplicaSet(verbose=verbose)
# Clear the assetstore database and create a GridFS assetstore
self.model('assetstore').remove(self.model('assetstore').getCurrent())
# When the mongo connection to one of the replica sets goes down, it
# takes twice the socket timeout for us to reconnect and get on with
# an upload. We can override the default timeout by passing it as a
# mongodb uri parameter.
assetstore = self.model('assetstore').createGridFsAssetstore(
name='Test', db='girder_assetstore_rs_upload_test',
mongohost='mongodb://127.0.0.1:27070,127.0.0.1:27071,'
'127.0.0.1:27072/?socketTimeoutMS=5000&connectTimeoutMS=2500',
replicaset='replicaset')
self.assetstore = assetstore
self._testUpload()
# Test having the primary replica set going offline and then uploading
# again. If the current primary goes offline, it seems to take mongo
# 30 seconds to elect a new primary. If we step down the current
# primary before pausing it, then the new election will happen in 20
# seconds.
mongo_replicaset.stepDownMongoReplicaSet(0)
mongo_replicaset.waitForRSStatus(
mongo_replicaset.getMongoClient(0), status=[2, (1, 2), (1, 2)],
verbose=verbose)
mongo_replicaset.pauseMongoReplicaSet([True], verbose=verbose)
self._uploadFile('rs_upload_1')
# Have a different member of the replica set go offline and the first
# come back. This takes a long time, so I am disabling it
# mongo_replicaset.pauseMongoReplicaSet([False, True], verbose=verbose)
# self._uploadFile('rs_upload_2')
# Have the set come back online and upload once more
mongo_replicaset.pauseMongoReplicaSet([False, False], verbose=verbose)
self._uploadFile('rs_upload_3')
mongo_replicaset.stopMongoReplicaSet()
def testS3AssetstoreUpload(self):
# Clear the assetstore database and create an S3 assetstore
self.model('assetstore').remove(self.assetstore)
params = {
'name': 'S3 Assetstore',
'bucket': 'bucketname',
'prefix': 'testprefix',
'accessKeyId': 'someKey',
'secret': 'someSecret',
'service': base.mockS3Server.service
}
assetstore = self.model('assetstore').createS3Assetstore(**params)
self.assetstore = assetstore
self._testUpload()
# make an untracked upload to test that we can find and clear it
conn = botoConnectS3(base.mockS3Server.botoConnect)
bucket = conn.lookup(bucket_name='bucketname', validate=True)
bucket.initiate_multipart_upload('testprefix/abandoned_upload')
resp = self.request(path='/system/uploads', method='GET',
user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
# Ask to delete it
resp = self.request(path='/system/uploads', method='DELETE',
user=self.admin)
self.assertStatusOk(resp)
# Check that it is gone
resp = self.request(path='/system/uploads', method='GET',
user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json, [])
|
|
import sys
import os
import codecs
import pygettext
MESSAGES = []
def detect_unicode_encoding(bytes):
encodings_map = [
(3, codecs.BOM_UTF8, 'UTF-8'),
(4, codecs.BOM_UTF32_LE, 'UTF-32LE'),
(4, codecs.BOM_UTF32_BE, 'UTF-32BE'),
(2, codecs.BOM_UTF16_LE, 'UTF-16LE'),
(2, codecs.BOM_UTF16_BE, 'UTF-16BE'),
]
for (offset, bom, name) in encodings_map:
if bytes[:offset] == bom:
return name, offset
return 'UTF-8', 0
class ParseError(ValueError):
"""Signals an error reading .po file."""
def merge(master_file, language_files):
parsed_master_file = parse(master_file)
for path in language_files:
merging(parsed_master_file, path)
def merging(parsed_master_file, path):
lang_file = parse(path)
id_map = {}
new_lang = []
for msg in lang_file:
id_map[msg['id']] = msg['message']
for msg in parsed_master_file:
msg['message'] = id_map.get(msg['id'])
new_lang.append(msg)
save(path, new_lang)
def items(path, sort_by, dir):
po = parse(path)
po = po[1:]
if sort_by:
return sort(po, sort_by, dir)
return po
def sort(po, sort_by, dir):
group = dict()
sorted = list()
col_map = dict(id='id', string='message', context='path')
for message in po:
group.setdefault(message[col_map[sort_by]], []).append(message)
kg = group.keys()
kg.sort()
if dir == 'up':
kg.reverse()
for k in kg:
sorted.extend(group[k])
return sorted
def save(path, message_list):
txt = []
m = message_list[0]['message']
txt.append(m)
txt.append(u'\n\n')
for p in message_list[1:]:
message = p['message'] or ''
context = p['context']
id = p['id']
txt.append(u'#: %s' % context)
txt.append(u'msgid %s\n' % normalize(id))
txt.append(u'msgstr %s\n\n' % normalize(message))
txt = u''.join(txt)
backup_name = path.replace('.po', '.back')
try:
os.remove(backup_name)
except os.error:
pass
os.rename(path, backup_name)
codecs.open(path, 'wb', 'utf-8').write(txt)
def update(path, msg_id, msg_text):
message_list = parse(path)
for p in message_list[1:]:
if p['id'].strip() == msg_id.strip():
p['message'] = msg_text
save(path, message_list)
def quote(msg):
return pygettext.escape_unicode(msg)
def normalize(s):
# taken from pygettext module but changed a bit
lines = s.split('\n')
if len(lines) == 1:
s = '"' + quote(s) + '"'
else:
if not lines[-1]:
del lines[-1]
lines[-1] = lines[-1] + '\n'
for i in range(len(lines)):
lines[i] = quote(lines[i])
lineterm = '\\n"\n"'
s = '""\n"' + lineterm.join(lines) + '"'
return s
def add(id, str, context, fuzzy, MESSAGES):
"Add a non-fuzzy translation to the dictionary."
if fuzzy:
return
c = context.split(':')
path = c[0]
file = os.path.basename(path)
line = c[-1].replace('\n','') #remove the \n
MESSAGES.append(dict(id=id,
message=str,
path=path,
context=context,
file=file,
line=line
))
def parse(infile):
MESSAGES = list()
ID = 1
STR = 2
header = list()
fd = open(infile, 'rt')
encoding, offset = detect_unicode_encoding(fd.read(4))
fd.seek(offset)
lines = [line.decode(encoding) for line in fd.readlines()]
section = None
fuzzy = 0
# Parse the catalog
lno = 0
context = ''
prev_context = ''
heading = True
for l in lines:
if not l:
continue
lno += 1
if heading:
if l.startswith('#: '):
heading = False
if l.startswith('msgid "') and header and \
'Generated-By:' in header[-1]:
heading = False
if l.strip() and heading:
header.append(l)
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#' and section == STR:
add(msgid, msgstr, prev_context, fuzzy, MESSAGES)
section = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and l.find('fuzzy'):
fuzzy = 1
if l.startswith('#: '):
context = l[len('#: '):]
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid'):
if section == STR:
add(msgid, msgstr, prev_context, fuzzy, MESSAGES)
section = ID
prev_context = context
l = l[5:]
msgid = msgstr = ''
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
# XXX: Does this always follow Python escape semantics?
try:
l = eval(l)
except Exception, e:
print >> sys.stderr, 'Escape error on %s: %d' % (infile, lno), \
'before:', repr(l)
raise ParseError(e)
try:
l = l.decode('utf8')
except UnicodeDecodeError:
print >> sys.stderr, 'Encoding error on %s: %d' % (infile, lno), \
'before:', repr(l)
raise ParseError(e)
if section == ID:
msgid += l
elif section == STR:
msgstr += l
else:
print >> sys.stderr, 'Syntax error on %s:%d' % (infile, lno), \
'before:'
print >> sys.stderr, l
raise ParseError(e)
# Add last entry
if section == STR:
add(msgid, msgstr, prev_context, fuzzy, MESSAGES)
MESSAGES[0]['message'] = u''.join(header)
return MESSAGES
|
|
"""
Basic commonly used builder classes
"""
import numpy as np
import pandas as pa
from ..nodes import MDFNode, MDFEvalNode
from collections import deque, defaultdict
import datetime
import operator
import csv
import matplotlib.pyplot as pp
import sys
import types
if sys.version_info[0] > 2:
basestring = str
def _get_labels(node, label=None, value=None):
"""
returns a list of lables the same length as value, if value is
a list (or of length 1 if value is not a list)
If label is supplied that will be used as the base (eg x.0...x.N)
or if it's a list it will be padded to the correct length and returned.
"""
# if there's a value return enough labels for the value, if it's a list
if value is not None:
if label is None:
label = _get_labels(node)[0]
# if value is a list return a label for each element
if isinstance(value, (tuple, list, np.ndarray, pa.core.generic.NDFrame, pa.Index)):
# if the label is a list already pad it to the right size
if isinstance(label, (tuple, list, np.ndarray, pa.core.generic.NDFrame, pa.Index)):
label = list(label)
if len(label) < len(value):
label += ["%s.%d" % (label, i) for i in xrange(len(label), len(value))]
return label[:len(value)]
# otherwise create a list using the value's index
if isinstance(value, pa.Series):
return ["%s.%s" % (label, c) for c in value.index]
return ["%s.%d" % (label, i) for i in xrange(len(value))]
# if value is not a list return a single label
if isinstance(label, (tuple, list, np.ndarray, pa.core.generic.NDFrame)):
return list(label[:1])
return [label]
# if there's no value but a label, assume the value is a scalar and return a single label
if label is not None:
if isinstance(label, (tuple, list, np.ndarray, pa.core.generic.NDFrame)):
return list(label[:1])
return [label]
# if there's no value and no node, assume the value is a scalar and return the name of the node
if isinstance(node, MDFNode):
return [node.name.split(".").pop()]
return [str(node)]
def _relabel(columns, node_names, short_names, ctx_ids):
""""
return list of new column names that don't overlap
columns is a list of columns lists for each node.
"""
assert len(columns) == len(node_names) == len(short_names) == len(ctx_ids)
def _get_overlapping(columns):
col_count = {}
overlapping = set()
for cols in columns:
for col in cols:
col_count.setdefault(col, 0)
col_count[col] += 1
if col_count[col] > 1:
overlapping.add(col)
return overlapping
overlap = _get_overlapping(columns)
if not overlap:
return columns
# take a copy as this will be updated in-place
columns = [list(cols) for cols in columns]
# collect the node names and contexts for all overlapping columns
overlap_node_names = {}
for i, (cols, node_name, short_name, ctx_id) \
in enumerate(zip(columns, node_names, short_names, ctx_ids)):
for j, col in enumerate(cols):
if col in overlap:
overlap_node_names.setdefault(col, [])\
.append((i, j, node_name, short_name, ctx_id))
for col, details in overlap_node_names.iteritems():
is_, js_, node_names, short_names, ctx_ids = zip(*details)
# prefix with the node short names if they're unique
unique_short_names = np.unique(short_names)
if unique_short_names.size == len(short_names):
for i, j, node_name, short_name, ctx_id in details:
columns[i][j] = "%s.%s" % (col, short_name)
continue
# otherwise try prefixing with the full names
unique_node_names = np.unique(node_names)
if unique_node_names.size == len(node_names):
# if the short name is common replace it with the long name
if unique_short_names.size == 1 \
and col.startswith(unique_short_names[0]):
for i, j, node_name, short_name, ctx_id in details:
columns[i][j] = col.replace(short_name, node_name)
else:
for i, j, node_name, short_name, ctx_id in details:
columns[i][j] = "%s.%s" % (col, node_name)
continue
# otherwise if the contexts are unique use a context id suffix
unique_ctx_ids = np.unique(ctx_ids)
if unique_ctx_ids.size == len(ctx_ids):
for i, j, node_name, short_name, ctx_id in details:
columns[i][j] = "%s.ctx-%s" % (col, ctx_id)
continue
# If none of those are unique use a numeric suffix.
# This should be quite unlikely.
for x in xrange(len(details)):
columns[i][j] = "%s.%d" % (col, x)
return columns
def _pairs_to_node_label_lists(node_label_pairs):
results = []
for node_or_node_label_pair in node_label_pairs:
if isinstance(node_or_node_label_pair, (tuple, list)):
# it's a tuple/list (node, label)
results.append(node_or_node_label_pair)
else:
# it's just a node - use None as the label and a default
# will be selected in _get_labels
results.append((node_or_node_label_pair, None))
# return ([node,...], [label,...])
return map(list, zip(*results))
class CSVWriter(object):
"""
callable object that appends values to a csv file
For use with mdf.run
"""
def __init__(self, fh, nodes, columns=None):
"""
Writes node values to a csv file for each date.
'fh' may be a file handle, or a filename, or a node.
If fh is a node it will be evaluated for each context used
and is expected to evaluate to the filename or file handle
to write the results to.
"""
# keep track of any file handles opened by this instance so they
# can be closed.
self.fh = fh
self.open_fhs = []
# fh may be either a file handle, a filename or a node
# that evaluates to a file handle or name.
self.writers = {}
if not isinstance(fh, MDFNode):
# if fh isn't a node use the same writer for all contexts
if isinstance(fh, basestring):
fh = open(fh, "wb")
self.open_fhs.append(fh)
writer = csv.writer(fh)
self.writers = defaultdict(lambda: writer)
self.handlers = None
if isinstance(nodes, MDFNode):
nodes = [nodes]
if len(nodes) > 1 and columns is None:
self.nodes, self.columns = _pairs_to_node_label_lists(nodes)
else:
self.nodes = nodes
self.columns = list(columns or [])[:len(nodes)]
self.columns += [None] * (len(nodes) - len(self.columns))
def __del__(self):
self.close()
def close(self):
"""closes any file handles opened by this writer"""
while self.open_fhs:
fh = self.open_fhs.pop()
fh.close()
self.writers.clear()
def __call__(self, date, ctx):
# get the node values from the context
values = [ctx.get_value(node) for node in self.nodes]
# get the writer from the context, or create it if it's not been
# created already.
ctx_id = ctx.get_id()
try:
writer = self.writers[ctx_id]
except KeyError:
fh = self.fh
if isinstance(fh, MDFNode):
fh = ctx.get_value(fh)
if isinstance(fh, basestring):
fh = open(fh, "wb")
self.open_fhs.append(fh)
writer = self.writers[ctx_id] = csv.writer(fh)
# figure out how to handle them and what to write in the header
if self.handlers is None:
header = ["date"]
self.handlers = []
for node, value, column in zip(self.nodes, values, self.columns):
if isinstance(column, MDFNode):
column = ctx.get_value(column)
header.extend(_get_labels(node, column, value))
if isinstance(value, (basestring, int, float, bool, datetime.date)):
self.handlers.append(self._write_basetype)
elif isinstance(value, (list, tuple, np.ndarray, pa.Index, pa.core.generic.NDFrame)):
self.handlers.append(self._write_list)
elif isinstance(value, pa.Series):
self.handlers.append(self._write_series)
else:
raise Exception("Unhandled type %s for node %s" % (type(value), node))
# write the header
writer.writerow(header)
# format the values and write the row
row = [date]
for handler, value in zip(self.handlers, values):
handler(value, row)
writer.writerow(row)
def _write_basetype(self, value, row):
row.append(value)
def _write_list(self, value, row):
row.extend(value)
def _write_series(self, value, row):
row.extend(value)
class NodeTypeHandler(object):
"""
Base class for NodeData handling in DataFrameBuilder. Sub-classes
should override _handle(). Callers should call handle()
"""
def __init__(self, node, filter=False):
self._name = node.short_name
self._filter = node.get_filter() if filter and isinstance(node, MDFEvalNode) else None
self._index = []
self._labels = set()
self._data = dict()
def handle(self, date, ctx, value):
"""
Stashes the date and then handles the data
in the sub-class
"""
if self._filter is None \
or ctx[self._filter]:
self._index.append(date)
self._handle(date, value)
def _handle(self, date, value):
raise NotImplementedError("_handle must be implemented in the subclass")
def get_dataframe(self, dtype=object):
"""
Returns a DataFrame containing the values accumulated
for each column for a node.
"""
columns = self.get_columns()
df = pa.DataFrame(data={}, index=self._index, columns=columns, dtype=dtype)
for (d, l), value in self._data.items():
df[l][d] = value
return df
def get_columns(self):
"""
returns the columns used to construct the dataframe
in get_dataframe
"""
return sorted(self._labels)
class NodeListTypeHandler(NodeTypeHandler):
def __init__(self, node, filter=False):
super(NodeListTypeHandler, self).__init__(node, filter=filter)
def _handle(self, date, value):
# the set of labels is fixed on the first callback
# and is of the form node.name.X for int X
self._labels = self._labels or [self._name + "." + str(i) for i in range(len(value))]
assert len(self._labels) == len(value)
for l, v in zip(self._labels, value):
self._data[(date, l)] = v
class NodeDictTypeHandler(NodeTypeHandler):
def __init__(self, node, filter=False):
super(NodeDictTypeHandler, self).__init__(node, filter=filter)
def _handle(self, date, value):
# the set of labels can grow over time
# and they reflect the big union of the dict keys
self._labels = self._labels.union(map(str, value.keys()))
for k, v in value.items():
self._data[(date, str(k))] = v
class NodeSeriesTypeHandler(NodeTypeHandler):
def __init__(self, node, filter=False):
super(NodeSeriesTypeHandler, self).__init__(node, filter=filter)
def _handle(self, date, value):
# the set of labels can grow over time
# and they reflect the big union of the row labels in the
# node value Series
self._labels = self._labels.union(map(str, value.index))
for l in value.index:
self._data[(date, str(l))] = value[l]
class NodeBaseTypeHandler(NodeTypeHandler):
def __init__(self, node, filter=False):
super(NodeBaseTypeHandler, self).__init__(node, filter=filter)
self._labels.add(self._name)
def _handle(self, date, value):
self._data[(date, self._name)] = value
class DataFrameBuilder(object):
# version number to provide limited backwards compatibility
__version__ = 2
def __init__(self, nodes, contexts=None, dtype=object, sparse_fill_value=None, filter=False,
start_date=None):
"""
Constructs a new DataFrameBuilder.
dtype and sparse_fill_value can be supplied as hints to the
data type that will be constructed and whether or not to try
and create a sparse data frame.
If `filter` is True and the nodes are filtered then only values
where all the filters are True will be returned.
NB. the labels parameter is currently not supported
"""
self.context_handler_dict = {}
self.filter = filter
self.dtype = object
self.sparse_fill_value = None
self.start_date = start_date
self._finalized = False
self._cached_dataframes = {}
self._cached_columns = {}
if isinstance(nodes, MDFNode):
nodes = [nodes]
self.nodes = nodes
if contexts:
assert len(contexts) == len(nodes)
else:
contexts = []
self.contexts = contexts
self._last_ctx = None
def __call__(self, date, ctx):
# copy the version to this instance (this isn't done in the ctor as the regression
# testing works by creating the builders in the main process and then sending them
# to the remote processes - so the version is snapped when the builder is actually
# called).
self._version_ = self.__version__
self._last_ctx = ctx.get_id()
ctx_list = self.contexts or ([ctx] * len(self.nodes))
for ctx_, node in zip(ctx_list, self.nodes):
node_value = ctx_.get_value(node)
handler_dict = self.context_handler_dict.setdefault(ctx.get_id(), {})
key = (node.name, node.short_name, ctx_.get_id())
handler = handler_dict.get(key)
if not handler:
if isinstance(node_value, (basestring, int, float, bool, datetime.date)) \
or isinstance(node_value, tuple(np.typeDict.values())):
handler = NodeBaseTypeHandler(node, filter=self.filter)
elif isinstance(node_value, dict):
handler = NodeDictTypeHandler(node, filter=self.filter)
elif isinstance(node_value, pa.Series):
handler = NodeSeriesTypeHandler(node, filter=self.filter)
elif isinstance(node_value, (list, tuple, deque, np.ndarray, pa.Index, pa.core.generic.NDFrame)):
handler = NodeListTypeHandler(node, filter=self.filter)
else:
raise Exception("Unhandled type %s for node %s" % (type(node_value), node))
handler_dict[key] = handler
if (self.start_date is None) or (date >= self.start_date):
handler.handle(date, ctx_, node_value)
def clear(self):
self.context_handler_dict.clear()
self._cached_columns.clear()
self._cached_dataframes.clear()
def get_dataframe(self, ctx, dtype=None, sparse_fill_value=None):
ctx_id = ctx if isinstance(ctx, int) else ctx.get_id()
# if the builder's been finalized and there's a dataframe cached
# return that without trying to convert it to a sparse dataframe
# or changing the dtypes (dtype and sparse_fill_value are only
# hints).
try:
return self._cached_dataframes[ctx_id]
except KeyError:
pass
if dtype is None:
dtype = self.dtype
result_df = self._build_dataframe(ctx_id, dtype)
if sparse_fill_value is None:
sparse_fill_value = self.sparse_fill_value
if sparse_fill_value is not None:
# this doesn't always work depending on the actual dtype
# the dataframe ends up being
try:
result_df = result_df.to_sparse(fill_value=sparse_fill_value)
except TypeError:
pass
# try and infer types for any that are currently set to object
return result_df.convert_objects()
def _build_dataframe(self, ctx, dtype):
"""builds a dataframe from the collected data"""
ctx_id = ctx if isinstance(ctx, int) else ctx.get_id()
handler_dict = self.context_handler_dict[ctx_id]
if len(handler_dict) == 1:
# if there's only one handler simply get the dataframe from it
handler = next(iter(handler_dict.values()))
result_df = handler.get_dataframe(dtype=dtype)
else:
# otherwise do an outer join of all the handlers' dataframes
result_df = pa.DataFrame(dtype=dtype)
handler_keys, handlers = zip(*handler_dict.items())
dataframes = [h.get_dataframe(dtype=dtype) for h in handlers]
# relabel any overlapping columns
all_columns = [df.columns for df in dataframes]
node_names, short_names, ctx_ids = zip(*handler_keys)
new_columns = _relabel(all_columns, node_names, short_names, ctx_ids)
for df, cols in zip(dataframes, new_columns):
df.columns = cols
# join everything into a single dataframe
for df in dataframes:
result_df = result_df.join(df, how="outer")
result_df = result_df.reindex(columns=sorted(result_df.columns))
return result_df
def get_columns(self, node, ctx):
"""
returns the sub-set of columns in the dataframe returned
by get_dataframe that relate to a particular node
"""
ctx_id = ctx if isinstance(ctx, int) else ctx.get_id()
try:
return self._cached_columns[(ctx_id, node)]
except KeyError:
pass
handler_dict = self.context_handler_dict[ctx_id]
# the ctx is the root context passed to __call__, which may be
# different from the shifted contexts that the node was actually
# evaluated in.
# Get all the columns for this node in all sub-contexts.
columns = []
ctx_ids = []
for (node_name, short_name, sub_ctx_id), handler in handler_dict.items():
if node_name == node.name \
and short_name == node.short_name:
columns.append(handler.get_columns())
ctx_ids.append(sub_ctx_id)
# re-label in case the same node was evaluated in multiple sub-contexts
columns = _relabel(columns,
[node.name] * len(columns),
[node.short_name] * len(columns),
ctx_ids)
return reduce(operator.add, columns, [])
@property
def dataframes(self):
"""all dataframes created by this builder (one per context)"""
return [self.get_dataframe(ctx) for ctx in self.context_handler_dict.keys()]
@property
def dataframe(self):
return self.get_dataframe(self._last_ctx) if self._last_ctx is not None else None
def plot(self, show=True, **kwargs):
"""plots all collected dataframes and shows, if show=True"""
for df in self.dataframes:
df.plot(**kwargs)
legend = sorted(df.columns)
pp.legend(legend, loc='upper center', bbox_to_anchor=(0.5, -0.17), fancybox=True, shadow=True)
if show:
pp.show()
def finalize(self):
"""
Throw away intermediate structures and just retain any dataframes
and columns.
It's not possible to add more data to the builder after this has
been called.
"""
assert not self._finalized
# cache all dataframes and column sets
for ctx_id in list(self.context_handler_dict.keys()):
for node in self.nodes:
self._cached_columns[(ctx_id, node)] = self.get_columns(node, ctx_id)
self._cached_dataframes[ctx_id] = self.get_dataframe(ctx_id)
# delete the data for that context in case we're low on memory
del self.context_handler_dict[ctx_id]
# this should be empty now
assert len(self.context_handler_dict) == 0
# snap the version number if it's not already been taken (see __call__)
if not hasattr(self, "_version_"):
self._version_ = self.__version__
self._finalized = True
def combine_result(self, other, other_ctx, ctx):
"""
Adds a result from another df builder to this one.
If not already finalized this method will call finalize and
so no more data can be collected after this is called.
"""
ctx_id = ctx.get_id()
other_ctx_id = other_ctx.get_id()
# only the caches will be updated so make sure self has been
# finalized
if not self._finalized:
self.finalize()
# update self.nodes with any nodes from the other
nodes = set(self.nodes)
other_nodes = set(other.nodes)
additional_nodes = other_nodes.difference(nodes)
self.nodes += list(additional_nodes)
# copy the finalized data
for node in other.nodes:
self._cached_columns[(ctx_id, node)] = other.get_columns(node, other_ctx_id)
self._cached_dataframes[ctx_id] = other.get_dataframe(other_ctx_id)
class FinalValueCollector(object):
"""
callable object that collects the final values for a set of nodes.
For use with mdf.run
"""
def __init__(self, nodes):
if isinstance(nodes, MDFNode):
nodes = [nodes]
self.__nodes = nodes
self.__values = {}
self.__contexts = []
def __call__(self, date, ctx):
ctx_id = ctx.get_id()
self.__values[ctx_id] = [ctx.get_value(node) for node in self.__nodes]
if ctx_id not in self.__contexts:
self.__contexts.append(ctx_id)
def clear(self):
"""clears all previously collected values"""
self.__values.clear()
self.__contexts = []
def get_values(self, ctx=None):
"""returns the collected values for a context"""
if not self.__values:
return None
if ctx is None:
ctx = self.__contexts[-1]
ctx_id = ctx if isinstance(ctx, int) else ctx.get_id()
return self.__values.get(ctx_id, None)
def get_dict(self, ctx=None):
"""returns the collected values as a dict keyed by the nodes"""
values = self.get_values(ctx)
if values is None:
return None
return dict(zip(self.__nodes, values))
@property
def values(self):
"""returns the values for the last context"""
if not self.__contexts:
return None
ctx_id = self.__contexts[-1]
return self.get_values(ctx_id)
class NodeLogger(object):
"""
callable object for use with mdf run that logs a message
each time a node value changes.
"""
def __init__(self, nodes, fh=sys.stdout):
"""
``nodes`` is the list of node values to watch
``fh`` is a file like object to write to when changes are observed
"""
self.nodes = nodes
self.fh = fh
def __call__(self, date, ctx):
# get the max length of the node names for formatting nicely
max_len = max((len(node.name) for node in self.nodes))
fmt = "%%-%ds = %%s\n" % max_len
# get the initial values in the root context and any shifted contexts
root_ctx = ctx
values = [None] * len(self.nodes)
for i, node in enumerate(self.nodes):
values[i] = ctx[node]
# log the initial values
self.fh.write("%s:\n" % ctx)
for node, value in zip(self.nodes, values):
self.fh.write(fmt % (node.name, value))
self.fh.write("\n")
while True:
prev_values = list(values)
yield
# get the new values
for i, node in enumerate(self.nodes):
if node.has_value(ctx):
values[i] = ctx[node]
if values != prev_values:
self.fh.write("%s *changed*:\n" % ctx)
for node, value, prev_value in zip(self.nodes, values, prev_values):
if value != prev_value:
self.fh.write(fmt % (node.name, value))
self.fh.write("\n")
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests Keras multi worker fault tolerance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import signal
import sys
import tempfile
import threading
from absl.testing import parameterized
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_test_base as test_base
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.keras.distribute import multi_worker_testing_utils
from tensorflow.python.keras.distribute import multi_worker_training_state as training_state
from tensorflow.python.platform import test
def get_strategy_object(strategy_cls):
if strategy_cls == mirrored_strategy.MirroredStrategy:
return strategy_cls(mirrored_strategy.all_local_devices())
else:
# CollectiveAllReduceStrategy and ParameterServerStrategy.
return strategy_cls()
class KerasMultiWorkerFaultToleranceTest(test_base.IndependentWorkerTestBase,
parameterized.TestCase):
class PreemptionAtBatchBoundarySimulatingCallback(callbacks.Callback):
"""Callback to simulate preemtion at batch boundary."""
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_batch_begin(self, batch, logs=None):
if self._current_epoch == 1 and batch == 1 and not test_base.is_chief():
# Simulate preemtion at the start of second batch of second epoch.
raise RuntimeError('Preemption!')
def on_batch_end(self, batch, logs=None):
assert self._current_epoch < 1 or batch < 1
def on_epoch_end(self, epoch, logs=None):
assert epoch < 1
# TODO(rchao): Add tests for checking 0th and 2nd epoch boundary.
class PreemptionAtEpochBoundarySimulatingCallback(callbacks.Callback):
"""Callback to simulate preemtion at epoch boundary."""
def on_epoch_begin(self, epoch, logs=None):
if epoch == 1 and not test_base.is_chief():
# Simulate preemtion at the start of second epoch.
raise RuntimeError('Preemption!')
def on_epoch_end(self, epoch, logs=None):
assert epoch < 1
@combinations.generate(
combinations.combine(
# Eager runtime unfortunately cannot be tested with multi-threading.
# TODO(rchao): Add test to use multi-process for eager mode after
# b/132095481 is resolved.
mode=['graph'],
strategy_cls=[collective_strategy.CollectiveAllReduceStrategy],
required_gpus=[0, 1],
file_format=['h5', 'tf'],
preemption_callback=[
PreemptionAtEpochBoundarySimulatingCallback,
PreemptionAtBatchBoundarySimulatingCallback
],
# FT should work regardless of `ModelCheckpoint`'s parameters.
save_weights_only=[True, False],
load_weights_on_restart=[True, False],
))
def testFaultToleranceInSyncStrategy(self, strategy_cls, file_format,
preemption_callback, save_weights_only,
load_weights_on_restart):
"""Test fault-tolerance with multi-threading using sync dist-strat.
This test simulates multi-worker training that is interrupted by a
preemption, by having two threads, each of which represents a chief and a
non-chief worker, where the non-chief raises an error in the middle of
training loop. Upon excepting the error, a new thread with a new cluster
spec is created to simulate the recovered non-chief worker. Meanwhile, the
chief worker cannot proceed and hangs since the non-chief worker has
crashed. To simulate a restart of the chief, a new thread has been prepared
to run to take over chief with the help of a condition variable. It is
expected that after the restart of both chief and non-chief workers, the
training continues from the epoch they previously failed at. The test
concludes by verifying the preemption-interrupted training can finish with
the same loss and accuracy had the preemption not occurred.
TODO(rchao): Add test to check preemption on chief (possibly using multi
processes).
TODO(rchao): Add test to check fault-tolerance with multiple `model.fit()`.
Arguments:
strategy_cls: The strategy class to use.
file_format: `h5` or `tf`.
preemption_callback: The callback to simulate preemption.
save_weights_only: The argument for `model.fit()`'s `save_weights_only`.
load_weights_on_restart: The argument for `model.fit()`'s
`load_weights_on_restart`.
"""
def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument
with test.mock.patch.object(dc, '_run_std_server',
self._make_mock_run_std_server()):
# `before_restart` is True for the threads that represent the original
# chief and non-chief worker, and False for threads that represent the
# restarted chief and non-chief workers.
before_restart = kwargs['before_restart']
# Model building under strategy scope. Following is the code we expect
# the user runs on every worker.
strategy = get_strategy_object(strategy_cls)
batch_size = 64
steps = 3
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
with strategy.scope():
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
# Function to start a new thread. This will be called twice in the
# following code: one represents the restart of the non-chief, and one
# represents the restart of the chief as a result of the restart of the
# non-chief (so the training can continue in sync).
def start_new_thread(new_chief):
new_thread_tf_config = json.loads(os.environ['TF_CONFIG'])
# Update the ports in new chief and new worker threads.
new_thread_tf_config['cluster']['worker'] = kwargs['reserved_ports']
# Since both new chief and new worker threads are started from the
# worker thread, we need to overwrite the tf config task index.
new_thread_tf_config['task']['index'] = 0 if new_chief else 1
return self._run_task_in_thread(
task_fn=_independent_worker_fn,
cluster_spec=None,
task_type=None,
task_id=None,
tf_config=new_thread_tf_config,
before_restart=False,
new_chief=new_chief)
try:
class CkptSavedEpochAssertingCallback(callbacks.Callback):
def __init__(self, test_obj):
super(CkptSavedEpochAssertingCallback, self).__init__()
self.test_obj = test_obj
def on_epoch_begin(self, epoch, logs=None):
# `_ckpt_saved_epoch` attribute is set at the end of every epoch.
self.test_obj.assertEqual(
K.eval(self.model._ckpt_saved_epoch) ==
training_state.CKPT_SAVED_EPOCH_UNUSED_VALUE, epoch == 0)
callbacks_list = [
callbacks.ModelCheckpoint(
filepath=saving_filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=load_weights_on_restart),
CkptSavedEpochAssertingCallback(self)
]
if before_restart:
callbacks_list.append(preemption_callback())
self.assertFalse(hasattr(model, training_state.CKPT_SAVED_EPOCH))
history = model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=callbacks_list)
self.assertFalse(hasattr(model, training_state.CKPT_SAVED_EPOCH))
# `history` of the training result is collected to be compared against
# each other. It is expected that the training results (loss and
# accuracy`) are the same with or without preemption.
self._histories.append(history.history)
except RuntimeError:
# pylint: disable=g-assert-in-except
self.assertTrue(before_restart)
# Reset the barrier so the new threads simulating recovery can
# continue.
self._barrier._counter = 0
self._barrier._flag = False
# At this point we block the original non-chief thread, and
# start the new threads that simulate the restarted chief and
# non-chief, joining the threads and return.
new_chief_thread = start_new_thread(new_chief=True)
new_worker_thread = start_new_thread(new_chief=False)
self.join_independent_workers([new_chief_thread, new_worker_thread])
return
# Successful end of a `fit()` call.
with self._lock:
self._successful_thread_ends += 1
self.assertFalse(before_restart)
# Common parameters
num_workers = 2
num_epoch = 3
# History list storing the results for preemption and no preemption cases.
self._histories = []
# Lock required to prevent race condition between two threads.
self._lock = threading.Lock()
strategy = get_strategy_object(strategy_cls)
def handler(signum, frame):
del signum, frame
# `session.run()` within `model.fit()` can time out. Skipping it as it
# doesn't represent the failure of this test.
self.skipTest('Skipping test due to `session.run()` timeout.')
signal.signal(signal.SIGALRM, handler)
# Alarming within 5 min before the test timeouts and fails.
signal.alarm(240)
def get_saving_dir_and_filepath():
saving_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
saving_filepath = os.path.join(saving_dir, 'checkpoint.' + file_format)
return saving_dir, saving_filepath
# Case 1: Training for `num_epoch` without preemptions.
cluster_spec = test_base.create_cluster_spec(num_workers=num_workers)
self._barrier = dc._Barrier(2)
self._successful_thread_ends = 0
# Get a new temporary filepath to save the checkpoint to.
saving_dir, saving_filepath = get_saving_dir_and_filepath()
threads = self.run_multiple_tasks_in_threads(
_independent_worker_fn,
cluster_spec,
# Pass `saving_filepath` from the parent thread to ensure every worker
# has the same filepath to save.
saving_filepath=saving_filepath,
before_restart=False,
new_chief=False)
threads_to_join = []
if strategy.extended.experimental_between_graph:
for ts in threads.values():
threads_to_join.extend(ts)
else:
threads_to_join = [threads['worker'][0]]
self.join_independent_workers(threads_to_join)
# `self.test_skipped_reason` could be set when a non-main thread attempts
# to skip the test.
# `multi_worker_test_base.skip_if_grpc_server_cant_be_started()` is an
# example of where this can be set. Since raising `SkipTest` in a non-main
# thread doesn't actually skip the test, we check if the test should be
# skipped here once we have joined the threads.
if getattr(self, 'test_skipped_reason', None) is not None:
self.skipTest(self.test_skipped_reason)
self.assertTrue(
training_state.remove_checkpoint_if_exists(saving_dir, saving_filepath))
self.assertEqual(self._successful_thread_ends, 2)
# Case 2: Training for `num_epoch` epoch with preemptions.
# The preemption is simulated at both epoch boundary and batch boundary.
cluster_spec = test_base.create_cluster_spec(num_workers=num_workers)
self._barrier = dc._Barrier(2)
# Ports reserved for new threads simulating recovery.
reserved_ports = [
'localhost:%s' % test_base.pick_unused_port()
for _ in range(num_workers)
]
self._successful_thread_ends = 0
# Get a new temporary filepath to save the checkpoint to.
saving_dir, saving_filepath = get_saving_dir_and_filepath()
threads = self.run_multiple_tasks_in_threads(
_independent_worker_fn,
cluster_spec,
# Pass `saving_filepath` from the parent thread to ensure every worker
# has the same filepath to save.
saving_filepath=saving_filepath,
reserved_ports=reserved_ports,
before_restart=True,
new_chief=False)
threads_to_join = []
if strategy.extended.experimental_between_graph:
# Only join the non-chief thread since the first thread for chief will
# eventually hang and be ignored.
threads_to_join = [threads['worker'][1]]
else:
threads_to_join = [threads['worker'][0]]
self.join_independent_workers(threads_to_join)
if getattr(self, 'test_skipped_reason', None) is not None:
self.skipTest(self.test_skipped_reason)
self.assertTrue(
training_state.remove_checkpoint_if_exists(saving_dir, saving_filepath))
self.assertEqual(self._successful_thread_ends, 2)
def assert_all_elements_are_identical(list_to_check):
first_item = list_to_check[0]
for item in list_to_check[1:]:
self.assertAllClose(first_item, item, rtol=2e-5, atol=1e-5)
# Important: the results from preemption interrupted and non-interrupted
# cases should give the same final results.
assert_all_elements_are_identical(
[history['acc'][-1] for history in self._histories])
assert_all_elements_are_identical(
[history['loss'][-1] for history in self._histories])
# The length of `self._histories` would be num_workers * num_runs (3).
self.assertLen(self._histories, 4)
# Results from case 1 should have 3 full epochs.
self.assertLen(self._histories[0]['acc'], 3)
# Results from case 2 should only have 2 full epochs because it restarted at
# epoch 1.
self.assertLen(self._histories[-1]['acc'], 2)
if __name__ == '__main__':
with test.mock.patch.object(sys, 'exit', os._exit):
test.main()
|
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.asset_report_transaction_all_of import AssetReportTransactionAllOf
from plaid.model.location import Location
from plaid.model.payment_meta import PaymentMeta
from plaid.model.transaction_base import TransactionBase
globals()['AssetReportTransactionAllOf'] = AssetReportTransactionAllOf
globals()['Location'] = Location
globals()['PaymentMeta'] = PaymentMeta
globals()['TransactionBase'] = TransactionBase
class AssetReportTransaction(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('transaction_type',): {
'DIGITAL': "digital",
'PLACE': "place",
'SPECIAL': "special",
'UNRESOLVED': "unresolved",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'original_description': (str, none_type,), # noqa: E501
'account_id': (str,), # noqa: E501
'amount': (float,), # noqa: E501
'iso_currency_code': (str, none_type,), # noqa: E501
'unofficial_currency_code': (str, none_type,), # noqa: E501
'date': (date,), # noqa: E501
'pending': (bool,), # noqa: E501
'transaction_id': (str,), # noqa: E501
'transaction_type': (str,), # noqa: E501
'pending_transaction_id': (str, none_type,), # noqa: E501
'category_id': (str, none_type,), # noqa: E501
'category': ([str], none_type,), # noqa: E501
'location': (Location,), # noqa: E501
'payment_meta': (PaymentMeta,), # noqa: E501
'account_owner': (str, none_type,), # noqa: E501
'name': (str,), # noqa: E501
'merchant_name': (str, none_type,), # noqa: E501
'check_number': (str, none_type,), # noqa: E501
'date_transacted': (str, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'original_description': 'original_description', # noqa: E501
'account_id': 'account_id', # noqa: E501
'amount': 'amount', # noqa: E501
'iso_currency_code': 'iso_currency_code', # noqa: E501
'unofficial_currency_code': 'unofficial_currency_code', # noqa: E501
'date': 'date', # noqa: E501
'pending': 'pending', # noqa: E501
'transaction_id': 'transaction_id', # noqa: E501
'transaction_type': 'transaction_type', # noqa: E501
'pending_transaction_id': 'pending_transaction_id', # noqa: E501
'category_id': 'category_id', # noqa: E501
'category': 'category', # noqa: E501
'location': 'location', # noqa: E501
'payment_meta': 'payment_meta', # noqa: E501
'account_owner': 'account_owner', # noqa: E501
'name': 'name', # noqa: E501
'merchant_name': 'merchant_name', # noqa: E501
'check_number': 'check_number', # noqa: E501
'date_transacted': 'date_transacted', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, original_description, account_id, amount, iso_currency_code, unofficial_currency_code, date, pending, transaction_id, *args, **kwargs): # noqa: E501
"""AssetReportTransaction - a model defined in OpenAPI
Args:
original_description (str, none_type): The string returned by the financial institution to describe the transaction. For transactions returned by `/transactions/get`, this field is in beta and will be omitted unless the client is both enrolled in the closed beta program and has set `options.include_original_description` to `true`.
account_id (str): The ID of the account in which this transaction occurred.
amount (float): The settled value of the transaction, denominated in the account's currency, as stated in `iso_currency_code` or `unofficial_currency_code`. Positive values when money moves out of the account; negative values when money moves in. For example, debit card purchases are positive; credit card payments, direct deposits, and refunds are negative.
iso_currency_code (str, none_type): The ISO-4217 currency code of the transaction. Always `null` if `unofficial_currency_code` is non-null.
unofficial_currency_code (str, none_type): The unofficial currency code associated with the transaction. Always `null` if `iso_currency_code` is non-`null`. Unofficial currency codes are used for currencies that do not have official ISO currency codes, such as cryptocurrencies and the currencies of certain countries. See the [currency code schema](https://plaid.com/docs/api/accounts#currency-code-schema) for a full listing of supported `iso_currency_code`s.
date (date): For pending transactions, the date that the transaction occurred; for posted transactions, the date that the transaction posted. Both dates are returned in an [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format ( `YYYY-MM-DD` ).
pending (bool): When `true`, identifies the transaction as pending or unsettled. Pending transaction details (name, type, amount, category ID) may change before they are settled.
transaction_id (str): The unique ID of the transaction. Like all Plaid identifiers, the `transaction_id` is case sensitive.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
transaction_type (str): Please use the `payment_channel` field, `transaction_type` will be deprecated in the future. `digital:` transactions that took place online. `place:` transactions that were made at a physical location. `special:` transactions that relate to banks, e.g. fees or deposits. `unresolved:` transactions that do not fit into the other three types. . [optional] # noqa: E501
pending_transaction_id (str, none_type): The ID of a posted transaction's associated pending transaction, where applicable.. [optional] # noqa: E501
category_id (str, none_type): The ID of the category to which this transaction belongs. For a full list of categories, see [`/categories/get`](https://plaid.com/docs/api/products/transactions/#categoriesget). If the `transactions` object was returned by an Assets endpoint such as `/asset_report/get/` or `/asset_report/pdf/get`, this field will only appear in an Asset Report with Insights.. [optional] # noqa: E501
category ([str], none_type): A hierarchical array of the categories to which this transaction belongs. For a full list of categories, see [`/categories/get`](https://plaid.com/docs/api/products/transactions/#categoriesget). If the `transactions` object was returned by an Assets endpoint such as `/asset_report/get/` or `/asset_report/pdf/get`, this field will only appear in an Asset Report with Insights.. [optional] # noqa: E501
location (Location): [optional] # noqa: E501
payment_meta (PaymentMeta): [optional] # noqa: E501
account_owner (str, none_type): The name of the account owner. This field is not typically populated and only relevant when dealing with sub-accounts.. [optional] # noqa: E501
name (str): The merchant name or transaction description. If the `transactions` object was returned by a Transactions endpoint such as `/transactions/get`, this field will always appear. If the `transactions` object was returned by an Assets endpoint such as `/asset_report/get/` or `/asset_report/pdf/get`, this field will only appear in an Asset Report with Insights.. [optional] # noqa: E501
merchant_name (str, none_type): The merchant name, as extracted by Plaid from the `name` field.. [optional] # noqa: E501
check_number (str, none_type): The check number of the transaction. This field is only populated for check transactions.. [optional] # noqa: E501
date_transacted (str, none_type): The date on which the transaction took place, in IS0 8601 format.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'original_description': original_description,
'account_id': account_id,
'amount': amount,
'iso_currency_code': iso_currency_code,
'unofficial_currency_code': unofficial_currency_code,
'date': date,
'pending': pending,
'transaction_id': transaction_id,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
AssetReportTransactionAllOf,
TransactionBase,
],
'oneOf': [
],
}
|
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.cells import utils as cells_utils
from nova import objects
from nova.tests.functional.api_sample_tests import api_sample_base
from nova.virt import fake
class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = "os-hypervisors"
def test_hypervisors_list(self):
response = self._do_get('os-hypervisors')
self._verify_response('hypervisors-list-resp', {}, response, 200)
def test_hypervisors_search(self):
response = self._do_get('os-hypervisors/fake/search')
self._verify_response('hypervisors-search-resp', {}, response, 200)
def test_hypervisors_without_servers(self):
response = self._do_get('os-hypervisors/fake/servers')
self._verify_response('hypervisors-without-servers-resp',
{}, response, 200)
@mock.patch("nova.compute.api.HostAPI.instance_get_all_by_host")
def test_hypervisors_with_servers(self, mock_instance_get):
instance = [
{
"deleted": None,
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"deleted": None,
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}]
mock_instance_get.return_value = instance
response = self._do_get('os-hypervisors/fake/servers')
self._verify_response('hypervisors-with-servers-resp', {},
response, 200)
def test_hypervisors_detail(self):
hypervisor_id = '1'
subs = {
'hypervisor_id': hypervisor_id,
'service_id': '[0-9]+',
}
response = self._do_get('os-hypervisors/detail')
self._verify_response('hypervisors-detail-resp', subs, response, 200)
def test_hypervisors_show(self):
hypervisor_id = '1'
subs = {
'hypervisor_id': hypervisor_id,
'service_id': '[0-9]+',
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
self._verify_response('hypervisors-show-resp', subs, response, 200)
def test_hypervisors_statistics(self):
response = self._do_get('os-hypervisors/statistics')
self._verify_response('hypervisors-statistics-resp', {}, response, 200)
def test_hypervisors_uptime(self):
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
self.stub_out('nova.compute.api.HostAPI.get_host_uptime',
fake_get_host_uptime)
hypervisor_id = '1'
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
class HypervisorsCellsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = "os-hypervisors"
def setUp(self):
self.flags(enable=True, cell_type='api', group='cells')
super(HypervisorsCellsSampleJsonTests, self).setUp()
def test_hypervisor_uptime(self, mocks):
fake_hypervisor = objects.ComputeNode(id=1, host='fake-mini',
hypervisor_hostname='fake-mini')
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
def fake_compute_node_get(self, context, hyp):
return fake_hypervisor
def fake_service_get_by_compute_host(self, context, host):
return cells_utils.ServiceProxy(
objects.Service(id=1, host='fake-mini', disabled=False,
disabled_reason=None),
'cell1')
self.stub_out(
'nova.compute.cells_api.HostAPI.compute_node_get',
fake_compute_node_get)
self.stub_out(
'nova.compute.cells_api.HostAPI.service_get_by_compute_host',
fake_service_get_by_compute_host)
self.stub_out(
'nova.compute.cells_api.HostAPI.get_host_uptime',
fake_get_host_uptime)
hypervisor_id = fake_hypervisor.id
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {'hypervisor_id': str(hypervisor_id)}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
class HypervisorsSampleJson228Tests(HypervisorsSampleJsonTests):
microversion = '2.28'
scenarios = [('v2_28', {'api_major_version': 'v2.1'})]
def setUp(self):
super(HypervisorsSampleJson228Tests, self).setUp()
self.api.microversion = self.microversion
class HypervisorsSampleJson233Tests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = "os-hypervisors"
microversion = '2.33'
scenarios = [('v2_33', {'api_major_version': 'v2.1'})]
def setUp(self):
super(HypervisorsSampleJson233Tests, self).setUp()
self.api.microversion = self.microversion
# Start a new compute service to fake a record with hypervisor id=2
# for pagination test.
host = 'host1'
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
self.start_service('compute', host=host)
def test_hypervisors_list(self):
response = self._do_get('os-hypervisors?limit=1&marker=1')
self._verify_response('hypervisors-list-resp', {}, response, 200)
def test_hypervisors_detail(self):
subs = {
'hypervisor_id': '2',
'host': 'host1',
'host_name': 'host1'
}
response = self._do_get('os-hypervisors/detail?limit=1&marker=1')
self._verify_response('hypervisors-detail-resp', subs, response, 200)
class HypervisorsSampleJson253Tests(HypervisorsSampleJson228Tests):
microversion = '2.53'
scenarios = [('v2_53', {'api_major_version': 'v2.1'})]
def setUp(self):
super(HypervisorsSampleJson253Tests, self).setUp()
self.compute_node_1 = self.compute.service_ref.compute_node
def generalize_subs(self, subs, vanilla_regexes):
"""Give the test a chance to modify subs after the server response
was verified, and before the on-disk doc/api_samples file is checked.
"""
# When comparing the template to the sample we just care that the
# hypervisor id and service id are UUIDs.
subs['hypervisor_id'] = vanilla_regexes['uuid']
subs['service_id'] = vanilla_regexes['uuid']
return subs
def test_hypervisors_list(self):
# Start another compute service to get a 2nd compute for paging tests.
compute_node_2 = self.start_service(
'compute', host='host2').service_ref.compute_node
marker = self.compute_node_1.uuid
response = self._do_get('os-hypervisors?limit=1&marker=%s' % marker)
subs = {'hypervisor_id': compute_node_2.uuid}
self._verify_response('hypervisors-list-resp', subs, response, 200)
def test_hypervisors_detail(self):
# Start another compute service to get a 2nd compute for paging tests.
host = 'host2'
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
service_2 = self.start_service('compute', host=host).service_ref
compute_node_2 = service_2.compute_node
marker = self.compute_node_1.uuid
subs = {
'hypervisor_id': compute_node_2.uuid,
'service_id': service_2.uuid
}
response = self._do_get('os-hypervisors/detail?limit=1&marker=%s' %
marker)
self._verify_response('hypervisors-detail-resp', subs, response, 200)
@mock.patch("nova.compute.api.HostAPI.instance_get_all_by_host")
def test_hypervisors_detail_with_servers(self, instance_get_all_by_host):
"""List hypervisors with details and with hosted servers."""
instances = [
{
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}]
instance_get_all_by_host.return_value = instances
response = self._do_get('os-hypervisors/detail?with_servers=1')
subs = {
'hypervisor_id': self.compute_node_1.uuid,
'service_id': self.compute.service_ref.uuid,
}
self._verify_response('hypervisors-detail-with-servers-resp',
subs, response, 200)
def test_hypervisors_search(self):
"""The search route is deprecated in 2.53 and is now a query parameter
on the GET /os-hypervisors API.
"""
response = self._do_get(
'os-hypervisors?hypervisor_hostname_pattern=fake')
subs = {'hypervisor_id': self.compute_node_1.uuid}
self._verify_response('hypervisors-search-resp', subs, response, 200)
@mock.patch("nova.compute.api.HostAPI.instance_get_all_by_host")
def test_hypervisors_with_servers(self, instance_get_all_by_host):
"""The servers route is deprecated in 2.53 and is now a query parameter
on the GET /os-hypervisors API.
"""
instances = [
{
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}]
instance_get_all_by_host.return_value = instances
response = self._do_get('os-hypervisors?with_servers=true')
subs = {'hypervisor_id': self.compute_node_1.uuid}
self._verify_response('hypervisors-with-servers-resp', subs,
response, 200)
def test_hypervisors_without_servers(self):
# This is the same as GET /os-hypervisors in 2.53 which is covered by
# test_hypervisors_list already.
pass
def test_hypervisors_uptime(self):
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
self.stub_out('nova.compute.api.HostAPI.get_host_uptime',
fake_get_host_uptime)
hypervisor_id = self.compute_node_1.uuid
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
def test_hypervisors_show(self):
hypervisor_id = self.compute_node_1.uuid
subs = {
'hypervisor_id': hypervisor_id,
'service_id': self.compute.service_ref.uuid,
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
self._verify_response('hypervisors-show-resp', subs, response, 200)
@mock.patch("nova.compute.api.HostAPI.instance_get_all_by_host")
def test_hypervisors_show_with_servers(self, instance_get_all_by_host):
"""Tests getting details for a specific hypervisor and including the
hosted servers in the response.
"""
instances = [
{
"name": "test_server1",
"uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
},
{
"name": "test_server2",
"uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
}]
instance_get_all_by_host.return_value = instances
hypervisor_id = self.compute_node_1.uuid
subs = {
'hypervisor_id': hypervisor_id,
'service_id': self.compute.service_ref.uuid,
}
response = self._do_get('os-hypervisors/%s?with_servers=1' %
hypervisor_id)
self._verify_response('hypervisors-show-with-servers-resp', subs,
response, 200)
|
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script searches for unused art assets listed in a .grd file.
It uses git grep to look for references to the IDR resource id or the base
filename. If neither is found, the file is reported unused.
Requires a git checkout. Must be run from your checkout's "src" root.
Example:
cd /work/chrome/src
tools/resources/find_unused_resouces.py chrome/browser/browser_resources.grd
"""
from __future__ import print_function
__author__ = 'jamescook@chromium.org (James Cook)'
import os
import re
import subprocess
import sys
def GetBaseResourceId(resource_id):
"""Removes common suffixes from a resource ID.
Removes suffixies that may be added by macros like IMAGE_GRID or IMAGE_BORDER.
For example, converts IDR_FOO_LEFT and IDR_FOO_RIGHT to just IDR_FOO.
Args:
resource_id: String resource ID.
Returns:
A string with the base part of the resource ID.
"""
suffixes = [
'_TOP_LEFT', '_TOP', '_TOP_RIGHT',
'_LEFT', '_CENTER', '_RIGHT',
'_BOTTOM_LEFT', '_BOTTOM', '_BOTTOM_RIGHT',
'_TL', '_T', '_TR',
'_L', '_M', '_R',
'_BL', '_B', '_BR']
# Note: This does not check _HOVER, _PRESSED, _HOT, etc. as those are never
# used in macros.
for suffix in suffixes:
if resource_id.endswith(suffix):
resource_id = resource_id[:-len(suffix)]
return resource_id
def FindFilesWithContents(string_a, string_b):
"""Returns list of paths of files that contain |string_a| or |string_b|.
Uses --name-only to print the file paths. The default behavior of git grep
is to OR together multiple patterns.
Args:
string_a: A string to search for (not a regular expression).
string_b: As above.
Returns:
A list of file paths as strings.
"""
matching_files = subprocess.check_output([
'git', 'grep', '--name-only', '--fixed-strings', '-e', string_a,
'-e', string_b])
files_list = matching_files.split('\n')
# The output ends in a newline, so slice that off.
files_list = files_list[:-1]
return files_list
def GetUnusedResources(grd_filepath):
"""Returns a list of resources that are unused in the code.
Prints status lines to the console because this function is quite slow.
Args:
grd_filepath: Path to a .grd file listing resources.
Returns:
A list of pairs of [resource_id, filepath] for the unused resources.
"""
unused_resources = []
grd_file = open(grd_filepath, 'r')
grd_data = grd_file.read()
print('Checking:')
# Match the resource id and file path out of substrings like:
# ...name="IDR_FOO_123" file="common/foo.png"...
# by matching between the quotation marks.
pattern = re.compile(
r"""name="([^"]*)" # Match resource ID between quotes.
\s* # Run of whitespace, including newlines.
file="([^"]*)" # Match file path between quotes.""",
re.VERBOSE)
# Use finditer over the file contents because there may be newlines between
# the name and file attributes.
searched = set()
for result in pattern.finditer(grd_data):
# Extract the IDR resource id and file path.
resource_id = result.group(1)
filepath = result.group(2)
filename = os.path.basename(filepath)
base_resource_id = GetBaseResourceId(resource_id)
# Do not bother repeating searches.
key = (base_resource_id, filename)
if key in searched:
continue
searched.add(key)
# Print progress as we go along.
print(resource_id)
# Ensure the resource isn't used anywhere by checking both for the resource
# id (which should appear in C++ code) and the raw filename (in case the
# file is referenced in a script, test HTML file, etc.).
matching_files = FindFilesWithContents(base_resource_id, filename)
# Each file is matched once in the resource file itself. If there are no
# other matching files, it is unused.
if len(matching_files) == 1:
# Give the user some happy news.
print('Unused!')
unused_resources.append([resource_id, filepath])
return unused_resources
def GetScaleDirectories(resources_path):
"""Returns a list of paths to per-scale-factor resource directories.
Assumes the directory names end in '_percent', for example,
ash/resources/default_200_percent or
chrome/app/theme/resources/touch_140_percent
Args:
resources_path: The base path of interest.
Returns:
A list of paths relative to the 'src' directory.
"""
file_list = os.listdir(resources_path)
scale_directories = []
for file_entry in file_list:
file_path = os.path.join(resources_path, file_entry)
if os.path.isdir(file_path) and file_path.endswith('_percent'):
scale_directories.append(file_path)
scale_directories.sort()
return scale_directories
def main():
# The script requires exactly one parameter, the .grd file path.
if len(sys.argv) != 2:
print('Usage: tools/resources/find_unused_resources.py <path/to/grd>')
sys.exit(1)
grd_filepath = sys.argv[1]
# Try to ensure we are in a source checkout.
current_dir = os.getcwd()
if os.path.basename(current_dir) != 'src':
print('Script must be run in your "src" directory.')
sys.exit(1)
# We require a git checkout to use git grep.
if not os.path.exists(current_dir + '/.git'):
print('You must use a git checkout for this script to run.')
print(current_dir + '/.git', 'not found.')
sys.exit(1)
# Look up the scale-factor directories.
resources_path = os.path.dirname(grd_filepath)
scale_directories = GetScaleDirectories(resources_path)
if not scale_directories:
print('No scale directories (like "default_100_percent") found.')
sys.exit(1)
# |unused_resources| stores pairs of [resource_id, filepath] for resource ids
# that are not referenced in the code.
unused_resources = GetUnusedResources(grd_filepath)
if not unused_resources:
print('All resources are used.')
sys.exit(0)
# Dump our output for the user.
print()
print('Unused resource ids:')
for resource_id, filepath in unused_resources:
print(resource_id)
# Print a list of 'git rm' command lines to remove unused assets.
print()
print('Unused files:')
for resource_id, filepath in unused_resources:
for directory in scale_directories:
print('git rm ' + os.path.join(directory, filepath))
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2011 Bastian Venthur
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Python library for the AR.Drone.
V.1 This module was tested with Python 2.6.6 and AR.Drone vanilla firmware 1.5.1.
V.2.alpha
"""
import logging
import socket
import struct
import sys
import threading
import multiprocessing
import arnetwork
import time
import numpy as np
__author__ = "Bastian Venthur"
ARDRONE_NAVDATA_PORT = 5554
ARDRONE_VIDEO_PORT = 5555
ARDRONE_COMMAND_PORT = 5556
ARDRONE_CONTROL_PORT = 5559
SESSION_ID = "943dac23"
USER_ID = "36355d78"
APP_ID = "21d958e4"
DEBUG = False
# 0: "Not defined"
# 131072: "Landed"
# 393216: "Taking-off-Floor"
# 393217: "Taking-off-Air"
# 262144: "Hovering"
# 524288: "Landing"
# 458752: "Stabilizing"
# 196608: "Moving"
# 262153 and 196613 and 262155 and 196614 and 458753: "Undefined"
ctrl_state_dict={0:0, 131072:1, 393216:2, 393217:3, 262144:4, 524288:5, 458752:6, 196608:7, 262153:8, 196613:9, 262155:10, 196614:11, 458753: 12}
class ARDrone(object):
"""ARDrone Class.
Instanciate this class to control your drone and receive decoded video and
navdata.
Possible value for video codec (drone2):
NULL_CODEC = 0,
UVLC_CODEC = 0x20, // codec_type value is used for START_CODE
P264_CODEC = 0x40,
MP4_360P_CODEC = 0x80,
H264_360P_CODEC = 0x81,
MP4_360P_H264_720P_CODEC = 0x82,
H264_720P_CODEC = 0x83,
MP4_360P_SLRS_CODEC = 0x84,
H264_360P_SLRS_CODEC = 0x85,
H264_720P_SLRS_CODEC = 0x86,
H264_AUTO_RESIZE_CODEC = 0x87, // resolution is automatically adjusted according to bitrate
MP4_360P_H264_360P_CODEC = 0x88,
"""
def __init__(self, is_ar_drone_2=False, hd=False):
self.seq_nr = 1
self.timer_t = 0.2
self.com_watchdog_timer = threading.Timer(self.timer_t, self.commwdg)
self.lock = threading.Lock()
self.speed = 0.2
self.hd = hd
if (self.hd):
self.image_shape = (720, 1280, 3)
else:
self.image_shape = (360, 640, 3)
time.sleep(0.5)
self.config_ids_string = [SESSION_ID, USER_ID, APP_ID]
self.configure_multisession(SESSION_ID, USER_ID, APP_ID, self.config_ids_string)
self.set_session_id (self.config_ids_string, SESSION_ID)
time.sleep(0.5)
self.set_profile_id(self.config_ids_string, USER_ID)
time.sleep(0.5)
self.set_app_id(self.config_ids_string, APP_ID)
time.sleep(0.5)
self.set_video_bitrate_control_mode(self.config_ids_string, "1")
time.sleep(0.5)
self.set_video_bitrate(self.config_ids_string, "10000")
time.sleep(0.5)
self.set_max_bitrate(self.config_ids_string, "10000")
time.sleep(0.5)
self.set_fps(self.config_ids_string, "30")
time.sleep(0.5)
if (self.hd):
self.set_video_codec(self.config_ids_string, 0x83)
else:
self.set_video_codec(self.config_ids_string, 0x81)
self.last_command_is_hovering = True
self.com_pipe, com_pipe_other = multiprocessing.Pipe()
if sys.platform == 'win32':
self.com_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.com_socket.bind(('127.0.0.1', 17482))
self.navdata = dict()
self.navdata[0] = dict(zip(['ctrl_state', 'battery', 'theta', 'phi', 'psi', 'altitude', 'vx', 'vy', 'vz', 'num_frames'], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
self.network_process = arnetwork.ARDroneNetworkProcess(com_pipe_other, is_ar_drone_2, self)
self.network_process.start()
if sys.platform == 'win32':
# print "waiting for connection"
self.com_socket.listen(3)
self.conn_network, addr = self.com_socket.accept()
# print "got connection"
self.image = np.zeros(self.image_shape, np.uint8)
self.time = 0
self.last_command_is_hovering = True
time.sleep(1.0)
self.at(at_config_ids , self.config_ids_string)
self.at(at_config, "general:navdata_demo", "TRUE")
def takeoff(self):
"""Make the drone takeoff."""
self.at(at_ftrim)
self.at(at_config, "control:altitude_max", "20000")
self.at(at_ref, True)
def land(self):
"""Make the drone land."""
self.at(at_ref, False)
def hover(self):
"""Make the drone hover."""
self.at(at_pcmd, False, 0, 0, 0, 0)
def move_left(self):
"""Make the drone move left."""
self.at(at_pcmd, True, -self.speed, 0, 0, 0)
def move_right(self):
"""Make the drone move right."""
self.at(at_pcmd, True, self.speed, 0, 0, 0)
def move_up(self):
"""Make the drone rise upwards."""
self.at(at_pcmd, True, 0, 0, self.speed, 0)
def move_down(self):
"""Make the drone decent downwards."""
self.at(at_pcmd, True, 0, 0, -self.speed, 0)
def move_forward(self):
"""Make the drone move forward."""
self.at(at_pcmd, True, 0, -self.speed, 0, 0)
def move_backward(self):
"""Make the drone move backwards."""
self.at(at_pcmd, True, 0, self.speed, 0, 0)
def turn_left(self):
"""Make the drone rotate left."""
self.at(at_pcmd, True, 0, 0, 0, -self.speed)
def turn_right(self):
"""Make the drone rotate right."""
self.at(at_pcmd, True, 0, 0, 0, self.speed)
def reset(self):
"""Toggle the drone's emergency state."""
# Enter emergency mode
self.at(at_ref, False, True)
self.at(at_ref, False, False)
# Leave emergency mode
self.at(at_ref, False, True)
def trim(self):
"""Flat trim the drone."""
self.at(at_ftrim)
def set_speed(self, speed):
"""Set the drone's speed.
Valid values are floats from [0..1]
"""
self.speed = speed
def set_camera_view(self, downward):
"""
Set which video camera is used. If 'downward' is true,
downward camera will be viewed - otherwise frontwards.
"""
channel = None
if downward:
channel = 0
else:
channel = 1
self.set_video_channel(self.config_ids_string, channel)
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
self.lock.acquire()
self.com_watchdog_timer.cancel()
cmd(self.seq_nr, *args, **kwargs)
self.seq_nr += 1
self.com_watchdog_timer = threading.Timer(self.timer_t, self.commwdg)
self.com_watchdog_timer.start()
self.lock.release()
def configure_multisession(self, session_id, user_id, app_id, config_ids_string):
self.at(at_config, "custom:session_id", session_id)
self.at(at_config, "custom:profile_id", user_id)
self.at(at_config, "custom:application_id", app_id)
def set_session_id (self, config_ids_string, session_id):
self.at(at_config_ids , config_ids_string)
self.at(at_config, "custom:session_id", session_id)
def set_profile_id (self, config_ids_string, profile_id):
self.at(at_config_ids , config_ids_string)
self.at(at_config, "custom:profile_id", profile_id)
def set_app_id (self, config_ids_string, app_id):
self.at(at_config_ids , config_ids_string)
self.at(at_config, "custom:application_id", app_id)
def set_video_bitrate_control_mode (self, config_ids_string, mode):
self.at(at_config_ids , config_ids_string)
self.at(at_config, "video:bitrate_control_mode", mode)
def set_video_bitrate (self, config_ids_string, bitrate):
self.at(at_config_ids , config_ids_string)
self.at(at_config, "video:bitrate", bitrate)
def set_video_channel(self, config_ids_string, channel):
self.at(at_config_ids , config_ids_string)
self.at(at_config, "video:video_channel", channel)
def set_max_bitrate(self, config_ids_string, max_bitrate):
self.at(at_config_ids , config_ids_string)
self.at(at_config, "video:max_bitrate", max_bitrate)
def set_fps (self, config_ids_string, fps):
self.at(at_config_ids , config_ids_string)
self.at(at_config, "video:codec_fps", fps)
def set_video_codec (self, config_ids_string, codec):
self.at(at_config_ids , config_ids_string)
self.at(at_config, "video:video_codec", codec)
def commwdg(self):
"""Communication watchdog signal.
This needs to be send regulary to keep the communication w/ the drone
alive.
"""
self.at(at_comwdg)
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
self.lock.acquire()
self.com_watchdog_timer.cancel()
self.com_pipe.send('die!')
# print "sending die!"
if sys.platform == 'win32':
self.conn_network.sendall('die!')
self.network_process.terminate()
self.network_process.join()
self.lock.release()
if sys.platform == 'win32':
self.com_socket.close()
def get_image(self):
_im = np.copy(self.image)
return _im
def get_navdata(self):
return self.navdata
def set_navdata(self, navdata):
self.navdata = navdata
self.get_navdata()
def set_image(self, image):
if (image.shape == self.image_shape):
self.image = image
self.image = image
def apply_command(self, command):
available_commands = ["emergency",
"land", "takeoff", "move_left", "move_right", "move_down", "move_up",
"move_backward", "move_forward", "turn_left", "turn_right", "hover"]
if command not in available_commands:
logging.error("Command %s is not a recognized command" % command)
if command != "hover":
self.last_command_is_hovering = False
if (command == "emergency"):
self.reset()
elif (command == "land"):
self.land()
self.last_command_is_hovering = True
elif (command == "takeoff"):
self.takeoff()
self.last_command_is_hovering = True
elif (command == "move_left"):
self.move_left()
elif (command == "move_right"):
self.move_right()
elif (command == "move_down"):
self.move_down()
elif (command == "move_up"):
self.move_up()
elif (command == "move_backward"):
self.move_backward()
elif (command == "move_forward"):
self.move_forward()
elif (command == "turn_left"):
self.turn_left()
elif (command == "turn_right"):
self.turn_right()
elif (command == "hover" and not self.last_command_is_hovering):
self.hover()
self.last_command_is_hovering = True
class ARDrone2(ARDrone):
def __init__(self, hd=False):
ARDrone.__init__(self, True, hd)
###############################################################################
### Low level AT Commands
###############################################################################
def at_ref(seq, takeoff, emergency=False):
"""
Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines
"""
p = 0b10001010101000000000000000000
if takeoff:
p += 0b1000000000
if emergency:
p += 0b0100000000
at("REF", seq, [p])
def at_pcmd(seq, progressive, lr, fb, vv, va):
"""
Makes the drone move (translate/rotate).
Parameters:
seq -- sequence number
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed.
"""
p = 1 if progressive else 0
at("PCMD", seq, [p, float(lr), float(fb), float(vv), float(va)])
def at_ftrim(seq):
"""
Tell the drone it's lying horizontally.
Parameters:
seq -- sequence number
"""
at("FTRIM", seq, [])
def at_zap(seq, stream):
"""
Selects which video stream to send on the video UDP port.
Parameters:
seq -- sequence number
stream -- Integer: video stream to broadcast
"""
# FIXME: improve parameters to select the modes directly
at("ZAP", seq, [stream])
def at_config(seq, option, value):
"""Set configuration parameters of the drone."""
at("CONFIG", seq, [str(option), str(value)])
def at_config_ids(seq, value):
"""Set configuration parameters of the drone."""
at("CONFIG_IDS", seq, value)
def at_ctrl(seq, num):
"""Ask the parrot to drop its configuration file"""
at("CTRL", seq, [num, 0])
def at_comwdg(seq):
"""
Reset communication watchdog.
"""
# FIXME: no sequence number
at("COMWDG", seq, [])
def at_aflight(seq, flag):
"""
Makes the drone fly autonomously.
Parameters:
seq -- sequence number
flag -- Integer: 1: start flight, 0: stop flight
"""
at("AFLIGHT", seq, [flag])
def at_pwm(seq, m1, m2, m3, m4):
"""
Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- front left command
m2 -- fright right command
m3 -- back right command
m4 -- back left command
"""
# FIXME: what type do mx have?
raise NotImplementedError()
def at_led(seq, anim, f, d):
"""
Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- ?: frequence in HZ of the animation
d -- Integer: total duration in seconds of the animation
"""
pass
def at_anim(seq, anim, d):
"""
Makes the drone execute a predefined movement (animation).
Parameters:
seq -- sequcence number
anim -- Integer: animation to play
d -- Integer: total duration in sections of the animation
"""
at("ANIM", seq, [anim, d])
def at(command, seq, params):
"""
Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string
"""
param_str = ''
for p in params:
if type(p) == int:
param_str += ",%d" % p
elif type(p) == float:
param_str += ",%d" % f2i(p)
elif type(p) == str:
param_str += ',"' + p + '"'
msg = "AT*%s=%i%s\r" % (command, seq, param_str)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg.encode("utf-8"), ("192.168.1.1", ARDRONE_COMMAND_PORT))
def f2i(f):
"""Interpret IEEE-754 floating-point value as signed integer.
Arguments:
f -- floating point value
"""
return struct.unpack('i', struct.pack('f', f))[0]
###############################################################################
### navdata
###############################################################################
def decode_navdata(packet):
"""Decode a navdata packet."""
offset = 0
_ = struct.unpack_from("IIII", packet, offset)
drone_state = dict()
drone_state['fly_mask'] = _[1] & 1 # FLY MASK : (0) ardrone is landed, (1) ardrone is flying
drone_state['video_mask'] = _[1] >> 1 & 1 # VIDEO MASK : (0) video disable, (1) video enable
drone_state['vision_mask'] = _[1] >> 2 & 1 # VISION MASK : (0) vision disable, (1) vision enable */
drone_state['control_mask'] = _[1] >> 3 & 1 # CONTROL ALGO (0) euler angles control, (1) angular speed control */
drone_state['altitude_mask'] = _[1] >> 4 & 1 # ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active */
drone_state['user_feedback_start'] = _[1] >> 5 & 1 # USER feedback : Start button state */
drone_state['command_mask'] = _[1] >> 6 & 1 # Control command ACK : (0) None, (1) one received */
drone_state['fw_file_mask'] = _[1] >> 7 & 1 # Firmware file is good (1) */
drone_state['fw_ver_mask'] = _[1] >> 8 & 1 # Firmware update is newer (1) */
drone_state['fw_upd_mask'] = _[1] >> 9 & 1 # Firmware update is ongoing (1) */
drone_state['navdata_demo_mask'] = _[1] >> 10 & 1 # Navdata demo : (0) All navdata, (1) only navdata demo */
drone_state['navdata_bootstrap'] = _[1] >> 11 & 1 # Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent */
drone_state['motors_mask'] = _[1] >> 12 & 1 # Motor status : (0) Ok, (1) Motors problem */
drone_state['com_lost_mask'] = _[1] >> 13 & 1 # Communication lost : (1) com problem, (0) Com is ok */
drone_state['vbat_low'] = _[1] >> 15 & 1 # VBat low : (1) too low, (0) Ok */
drone_state['user_el'] = _[1] >> 16 & 1 # User Emergency Landing : (1) User EL is ON, (0) User EL is OFF*/
drone_state['timer_elapsed'] = _[1] >> 17 & 1 # Timer elapsed : (1) elapsed, (0) not elapsed */
drone_state['angles_out_of_range'] = _[1] >> 19 & 1 # Angles : (0) Ok, (1) out of range */
drone_state['ultrasound_mask'] = _[1] >> 21 & 1 # Ultrasonic sensor : (0) Ok, (1) deaf */
drone_state['cutout_mask'] = _[1] >> 22 & 1 # Cutout system detection : (0) Not detected, (1) detected */
drone_state['pic_version_mask'] = _[1] >> 23 & 1 # PIC Version number OK : (0) a bad version number, (1) version number is OK */
drone_state['atcodec_thread_on'] = _[1] >> 24 & 1 # ATCodec thread ON : (0) thread OFF (1) thread ON */
drone_state['navdata_thread_on'] = _[1] >> 25 & 1 # Navdata thread ON : (0) thread OFF (1) thread ON */
drone_state['video_thread_on'] = _[1] >> 26 & 1 # Video thread ON : (0) thread OFF (1) thread ON */
drone_state['acq_thread_on'] = _[1] >> 27 & 1 # Acquisition thread ON : (0) thread OFF (1) thread ON */
drone_state['ctrl_watchdog_mask'] = _[1] >> 28 & 1 # CTRL watchdog : (1) delay in control execution (> 5ms), (0) control is well scheduled */
drone_state['adc_watchdog_mask'] = _[1] >> 29 & 1 # ADC Watchdog : (1) delay in uart2 dsr (> 5ms), (0) uart2 is good */
drone_state['com_watchdog_mask'] = _[1] >> 30 & 1 # Communication Watchdog : (1) com problem, (0) Com is ok */
drone_state['emergency_mask'] = _[1] >> 31 & 1 # Emergency landing : (0) no emergency, (1) emergency */
data = dict()
data['drone_state'] = drone_state
data['header'] = _[0]
data['seq_nr'] = _[2]
data['vision_flag'] = _[3]
offset += struct.calcsize("IIII")
has_flying_information = False
while 1:
try:
id_nr, size = struct.unpack_from("HH", packet, offset)
offset += struct.calcsize("HH")
except struct.error:
break
values = []
for i in range(size - struct.calcsize("HH")):
values.append(struct.unpack_from("c", packet, offset)[0])
offset += struct.calcsize("c")
# navdata_tag_t in navdata-common.h
if id_nr == 0:
has_flying_information = True
values = struct.unpack_from("IIfffifffI", "".join(values))
values = dict(zip(['ctrl_state', 'battery', 'theta', 'phi', 'psi', 'altitude', 'vx', 'vy', 'vz', 'num_frames'], values))
# convert the millidegrees into degrees and round to int, as they
values['ctrl_state'] = ctrl_state_dict[values['ctrl_state']]
# are not so precise anyways
for i in 'theta', 'phi', 'psi':
values[i] = int(values[i] / 1000)
data[id_nr] = values
return data, has_flying_information
if __name__ == "__main__":
'''
For testing purpose only
'''
import termios
import fcntl
import os
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
drone = ARDrone(is_ar_drone_2=True)
import cv2
try:
startvideo = True
video_waiting = False
while 1:
time.sleep(.0001)
if startvideo:
try:
cv2.imshow("Drone camera", cv2.cvtColor(drone.get_image(), cv2.COLOR_BGR2RGB))
cv2.waitKey(1)
except:
if not video_waiting:
print("Video will display when ready")
video_waiting = True
pass
try:
c = sys.stdin.read(1)
c = c.lower()
print("Got character", c)
if c == 'a':
drone.move_left()
if c == 'd':
drone.move_right()
if c == 'w':
drone.move_forward()
if c == 's':
drone.move_backward()
if c == ' ':
drone.land()
if c == '\n':
drone.takeoff()
if c == 'q':
drone.turn_left()
if c == 'e':
drone.turn_right()
if c == '1':
drone.move_up()
if c == '2':
drone.hover()
if c == '3':
drone.move_down()
if c == 't':
drone.reset()
if c == 'x':
drone.hover()
if c == 'y':
drone.trim()
if c == 'i':
startvideo = True
try:
navdata = drone.get_navdata()
print('Emergency landing =', navdata['drone_state']['emergency_mask'])
print('User emergency landing = ', navdata['drone_state']['user_el'])
print('Navdata type= ', navdata['drone_state']['navdata_demo_mask'])
print('Altitude= ', navdata[0]['altitude'])
print('video enable= ', navdata['drone_state']['video_mask'])
print('vision enable= ', navdata['drone_state']['vision_mask'])
print('command_mask= ', navdata['drone_state']['command_mask'])
except:
pass
if c == 'j':
print("Asking for configuration...")
drone.at(at_ctrl, 5)
time.sleep(0.5)
drone.at(at_ctrl, 4)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags)
drone.halt()
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
from . import subTLVs
from . import undefined_subtlvs
class prefix(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This list describes IPv4 extended prefixes and
attributes.
"""
__slots__ = (
"_path_helper", "_extmethods", "__state", "__subTLVs", "__undefined_subtlvs"
)
_yang_name = "prefix"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__subTLVs = YANGDynClass(
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__undefined_subtlvs = YANGDynClass(
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/state (container)
YANG Description: State parameters of an IPv4 extended prefix.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of an IPv4 extended prefix.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_subTLVs(self):
"""
Getter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs (container)
YANG Description: This container describes IS prefix sub-TLVs.
"""
return self.__subTLVs
def _set_subTLVs(self, v, load=False):
"""
Setter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_subTLVs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subTLVs() directly.
YANG Description: This container describes IS prefix sub-TLVs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subTLVs must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=subTLVs.subTLVs, is_container='container', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__subTLVs = t
if hasattr(self, "_set"):
self._set()
def _unset_subTLVs(self):
self.__subTLVs = YANGDynClass(
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_undefined_subtlvs(self):
"""
Getter method for undefined_subtlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs (container)
YANG Description: This container describes undefined ISIS TLVs.
"""
return self.__undefined_subtlvs
def _set_undefined_subtlvs(self, v, load=False):
"""
Setter method for undefined_subtlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_undefined_subtlvs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_undefined_subtlvs() directly.
YANG Description: This container describes undefined ISIS TLVs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """undefined_subtlvs must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=undefined_subtlvs.undefined_subtlvs, is_container='container', yang_name="undefined-subtlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__undefined_subtlvs = t
if hasattr(self, "_set"):
self._set()
def _unset_undefined_subtlvs(self):
self.__undefined_subtlvs = YANGDynClass(
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
subTLVs = __builtin__.property(_get_subTLVs)
undefined_subtlvs = __builtin__.property(_get_undefined_subtlvs)
_pyangbind_elements = OrderedDict(
[
("state", state),
("subTLVs", subTLVs),
("undefined_subtlvs", undefined_subtlvs),
]
)
from . import state
from . import subTLVs
from . import undefined_subtlvs
class prefix(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This list describes IPv4 extended prefixes and
attributes.
"""
__slots__ = (
"_path_helper", "_extmethods", "__state", "__subTLVs", "__undefined_subtlvs"
)
_yang_name = "prefix"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__subTLVs = YANGDynClass(
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__undefined_subtlvs = YANGDynClass(
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/state (container)
YANG Description: State parameters of an IPv4 extended prefix.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of an IPv4 extended prefix.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_subTLVs(self):
"""
Getter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs (container)
YANG Description: This container describes IS prefix sub-TLVs.
"""
return self.__subTLVs
def _set_subTLVs(self, v, load=False):
"""
Setter method for subTLVs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_subTLVs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subTLVs() directly.
YANG Description: This container describes IS prefix sub-TLVs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subTLVs must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=subTLVs.subTLVs, is_container='container', yang_name="subTLVs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__subTLVs = t
if hasattr(self, "_set"):
self._set()
def _unset_subTLVs(self):
self.__subTLVs = YANGDynClass(
base=subTLVs.subTLVs,
is_container="container",
yang_name="subTLVs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_undefined_subtlvs(self):
"""
Getter method for undefined_subtlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs (container)
YANG Description: This container describes undefined ISIS TLVs.
"""
return self.__undefined_subtlvs
def _set_undefined_subtlvs(self, v, load=False):
"""
Setter method for undefined_subtlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/undefined_subtlvs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_undefined_subtlvs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_undefined_subtlvs() directly.
YANG Description: This container describes undefined ISIS TLVs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """undefined_subtlvs must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=undefined_subtlvs.undefined_subtlvs, is_container='container', yang_name="undefined-subtlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__undefined_subtlvs = t
if hasattr(self, "_set"):
self._set()
def _unset_undefined_subtlvs(self):
self.__undefined_subtlvs = YANGDynClass(
base=undefined_subtlvs.undefined_subtlvs,
is_container="container",
yang_name="undefined-subtlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
subTLVs = __builtin__.property(_get_subTLVs)
undefined_subtlvs = __builtin__.property(_get_undefined_subtlvs)
_pyangbind_elements = OrderedDict(
[
("state", state),
("subTLVs", subTLVs),
("undefined_subtlvs", undefined_subtlvs),
]
)
|
|
import settings
import helpers
import SimpleITK # conda install -c https://conda.anaconda.org/simpleitk SimpleITK
import numpy
import pandas
import ntpath
import cv2 # conda install -c https://conda.anaconda.org/menpo opencv3
import shutil
import random
import math
import multiprocessing
from bs4 import BeautifulSoup # conda install beautifulsoup4, coda install lxml
import os
import glob
random.seed(1321)
numpy.random.seed(1321)
def find_mhd_file(patient_id):
""" find the '.mhd' file associated with a specific patient_id
"""
for subject_no in range(settings.LUNA_SUBSET_START_INDEX, 10):
src_dir = settings.LUNA16_RAW_SRC_DIR + "subset" + str(subject_no) + "/"
for src_path in glob.glob(src_dir + "*.mhd"):
if patient_id in src_path:
return src_path
return None
def load_lidc_xml(xml_path, agreement_threshold=0, only_patient=None, save_nodules=False):
""" Read the xml file and create a csv with the (x,y,z) location, diameter, and malignacy of
the positive examples, and a csv with the negative examples
"""
pos_lines = []
neg_lines = []
extended_lines = []
with open(xml_path, 'r') as xml_file:
markup = xml_file.read()
xml = BeautifulSoup(markup, features="xml")
if xml.LidcReadMessage is None:
return None, None, None
patient_id = xml.LidcReadMessage.ResponseHeader.SeriesInstanceUid.text
#If only looking for a single pateinte ID, return (None, None, None) if not the correct patient ID
if only_patient is not None:
if only_patient != patient_id:
return None, None, None
#find the associated '.mhd' file, or return (None, None, None)
src_path = find_mhd_file(patient_id)
if src_path is None:
return None, None, None
print(patient_id)
itk_img = SimpleITK.ReadImage(src_path)
img_array = SimpleITK.GetArrayFromImage(itk_img) # indexes are z,y,x (notice the ordering)
num_z, height, width = img_array.shape #heightXwidth constitute the transverse plane
origin = numpy.array(itk_img.GetOrigin()) # x,y,z Origin in world coordinates (mm)
spacing = numpy.array(itk_img.GetSpacing()) # spacing of voxels in world coor. (mm)
rescale = spacing / settings.TARGET_VOXEL_MM
reading_sessions = xml.LidcReadMessage.find_all("readingSession")
for reading_session in reading_sessions:
# print("Sesion")
nodules = reading_session.find_all("unblindedReadNodule")
for nodule in nodules:
nodule_id = nodule.noduleID.text
# print(" ", nodule.noduleID)
rois = nodule.find_all("roi")
x_min = y_min = z_min = 999999
x_max = y_max = z_max = -999999
if len(rois) < 2:
continue
for roi in rois:
z_pos = float(roi.imageZposition.text)
z_min = min(z_min, z_pos)
z_max = max(z_max, z_pos)
edge_maps = roi.find_all("edgeMap")
for edge_map in edge_maps:
x = int(edge_map.xCoord.text)
y = int(edge_map.yCoord.text)
x_min = min(x_min, x)
y_min = min(y_min, y)
x_max = max(x_max, x)
y_max = max(y_max, y)
if x_max == x_min:
continue
if y_max == y_min:
continue
x_diameter = x_max - x_min
x_center = x_min + x_diameter / 2
y_diameter = y_max - y_min
y_center = y_min + y_diameter / 2
z_diameter = z_max - z_min
z_center = z_min + z_diameter / 2
z_center -= origin[2]
z_center /= spacing[2]
x_center_perc = round(x_center / img_array.shape[2], 4)
y_center_perc = round(y_center / img_array.shape[1], 4)
z_center_perc = round(z_center / img_array.shape[0], 4)
diameter = max(x_diameter , y_diameter)
diameter_perc = round(max(x_diameter / img_array.shape[2], y_diameter / img_array.shape[1]), 4)
if nodule.characteristics is None:
print("!!!!Nodule:", nodule_id, " has no charecteristics")
continue
if nodule.characteristics.malignancy is None:
print("!!!!Nodule:", nodule_id, " has no malignacy")
continue
malignacy = nodule.characteristics.malignancy.text
sphericiy = nodule.characteristics.sphericity.text
margin = nodule.characteristics.margin.text
spiculation = nodule.characteristics.spiculation.text
texture = nodule.characteristics.texture.text
calcification = nodule.characteristics.calcification.text
internal_structure = nodule.characteristics.internalStructure.text
lobulation = nodule.characteristics.lobulation.text
subtlety = nodule.characteristics.subtlety.text
line = [nodule_id, x_center_perc, y_center_perc, z_center_perc, diameter_perc, malignacy]
extended_line = [patient_id, nodule_id, x_center_perc, y_center_perc, z_center_perc, diameter_perc, malignacy, sphericiy, margin, spiculation, texture, calcification, internal_structure, lobulation, subtlety ]
pos_lines.append(line)
extended_lines.append(extended_line)
nonNodules = reading_session.find_all("nonNodule")
for nonNodule in nonNodules:
z_center = float(nonNodule.imageZposition.text)
z_center -= origin[2]
z_center /= spacing[2]
x_center = int(nonNodule.locus.xCoord.text)
y_center = int(nonNodule.locus.yCoord.text)
nodule_id = nonNodule.nonNoduleID.text
x_center_perc = round(x_center / img_array.shape[2], 4)
y_center_perc = round(y_center / img_array.shape[1], 4)
z_center_perc = round(z_center / img_array.shape[0], 4)
diameter_perc = round(max(6 / img_array.shape[2], 6 / img_array.shape[1]), 4)
# print("Non nodule!", z_center)
line = [nodule_id, x_center_perc, y_center_perc, z_center_perc, diameter_perc, 0]
neg_lines.append(line)
# Check the distance from each nodule and compare against diameter of each nodule
#
if agreement_threshold > 1:
filtered_lines = []
for pos_line1 in pos_lines:
id1 = pos_line1[0]
x1 = pos_line1[1]
y1 = pos_line1[2]
z1 = pos_line1[3]
d1 = pos_line1[4]
overlaps = 0
for pos_line2 in pos_lines:
id2 = pos_line2[0]
if id1 == id2:
continue
x2 = pos_line2[1]
y2 = pos_line2[2]
z2 = pos_line2[3]
d2 = pos_line1[4]
dist = math.sqrt(math.pow(x1 - x2, 2) + math.pow(y1 - y2, 2) + math.pow(z1 - z2, 2))
if dist < d1 or dist < d2:
overlaps += 1
if overlaps >= agreement_threshold:
filtered_lines.append(pos_line1)
# else:
# print("Too few overlaps")
pos_lines = filtered_lines
df_annos = pandas.DataFrame(pos_lines, columns=["anno_index", "coord_x", "coord_y", "coord_z", "diameter", "malscore"])
df_annos.to_csv(settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/" + patient_id + "_annos_pos_lidc.csv", index=False)
df_neg_annos = pandas.DataFrame(neg_lines, columns=["anno_index", "coord_x", "coord_y", "coord_z", "diameter", "malscore"])
df_neg_annos.to_csv(settings.LUNA16_EXTRACTED_IMAGE_DIR + "_labels/" + patient_id + "_annos_neg_lidc.csv", index=False)
# return [patient_id, spacing[0], spacing[1], spacing[2]]
return pos_lines, neg_lines, extended_lines
def normalize(image):
""" Normalize image -> clip data between -1000 and 400. Scale values to 0 to 1. #### SCALE TO -.5 to .5 ##### TODO:???????
"""
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image > 1] = 1.
image[image < 0] = 0.
return image
def process_lidc_annotations(only_patient=None, agreement_threshold=0):
# lines.append(",".join())
file_no = 0
pos_count = 0
neg_count = 0
all_lines = []
for anno_dir in [d for d in glob.glob("resources/luna16_annotations/*") if os.path.isdir(d)]:
xml_paths = glob.glob(anno_dir + "/*.xml")
for xml_path in xml_paths:
print(file_no, ": ", xml_path)
pos, neg, extended = load_lidc_xml(xml_path=xml_path, only_patient=only_patient, agreement_threshold=agreement_threshold)
if pos is not None:
pos_count += len(pos)
neg_count += len(neg)
print("Pos: ", pos_count, " Neg: ", neg_count)
file_no += 1
all_lines += extended
# if file_no > 10:
# break
# extended_line = [nodule_id, x_center_perc, y_center_perc, z_center_perc, diameter_perc, malignacy, sphericiy, margin, spiculation, texture, calcification, internal_structure, lobulation, subtlety ]
df_annos = pandas.DataFrame(all_lines, columns=["patient_id", "anno_index", "coord_x", "coord_y", "coord_z", "diameter", "malscore", "sphericiy", "margin", "spiculation", "texture", "calcification", "internal_structure", "lobulation", "subtlety"])
df_annos.to_csv(settings.BASE_DIR + "lidc_annotations.csv", index=False)
if __name__ == "__main__":
if True:
process_lidc_annotations(only_patient=None, agreement_threshold=0)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This module will create a python virtual env and install external dependencies. If the
# virtualenv already exists and it contains all the expected packages, nothing is done.
#
# A multi-step bootstrapping process is required to build and install all of the
# dependencies:
# 1. install basic non-C/C++ packages into the virtualenv
# 1b. install packages that depend on step 1 but cannot be installed together with their
# dependencies
# 2. use the virtualenv Python to bootstrap the toolchain
# 3. use toolchain gcc to build C/C++ packages
# 4. build the kudu-python package with toolchain gcc and Cython
#
# Every time this script is run, it completes as many of the bootstrapping steps as
# possible with the available dependencies.
#
# This module can be run with python >= 2.4 but python >= 2.6 must be installed on the
# system. If the default 'python' command refers to < 2.6, python 2.6 will be used
# instead.
from __future__ import print_function
import glob
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
import textwrap
import urllib
LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
DEPS_DIR = os.path.join(os.path.dirname(__file__), "deps")
ENV_DIR = os.path.join(os.path.dirname(__file__), "env")
# Requirements file with packages we need for our build and tests.
REQS_PATH = os.path.join(DEPS_DIR, "requirements.txt")
# Second stage of requirements which cannot be installed together with their dependencies
# in requirements.txt.
REQS2_PATH = os.path.join(DEPS_DIR, "stage2-requirements.txt")
# Requirements for the next bootstrapping step that builds compiled requirements
# with toolchain gcc.
COMPILED_REQS_PATH = os.path.join(DEPS_DIR, "compiled-requirements.txt")
# Requirements for the Kudu bootstrapping step, which depends on Cython being installed
# by the compiled requirements step.
KUDU_REQS_PATH = os.path.join(DEPS_DIR, "kudu-requirements.txt")
# Requirements for the ADLS test client step, which depends on Cffi (C Foreign Function
# Interface) being installed by the compiled requirements step.
ADLS_REQS_PATH = os.path.join(DEPS_DIR, "adls-requirements.txt")
def delete_virtualenv_if_exist():
if os.path.exists(ENV_DIR):
shutil.rmtree(ENV_DIR)
def create_virtualenv():
LOG.info("Creating python virtualenv")
build_dir = tempfile.mkdtemp()
file = tarfile.open(find_file(DEPS_DIR, "virtualenv*.tar.gz"), "r:gz")
for member in file.getmembers():
file.extract(member, build_dir)
file.close()
python_cmd = detect_python_cmd()
exec_cmd([python_cmd, find_file(build_dir, "virtualenv*", "virtualenv.py"), "--quiet",
"--python", python_cmd, ENV_DIR])
shutil.rmtree(build_dir)
def exec_cmd(args, **kwargs):
'''Executes a command and waits for it to finish, raises an exception if the return
status is not zero. The command output is returned.
'args' and 'kwargs' use the same format as subprocess.Popen().
'''
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
**kwargs)
output = process.communicate()[0]
if process.returncode != 0:
raise Exception("Command returned non-zero status\nCommand: %s\nOutput: %s"
% (args, output))
return output
def use_ccache():
'''Returns true if ccache is available and should be used'''
if 'DISABLE_CCACHE' in os.environ: return False
try:
exec_cmd(['ccache', '-V'])
return True
except:
return False
def select_cc():
'''Return the C compiler command that should be used as a string or None if the
compiler is not available '''
# Use toolchain gcc for ABI compatibility with other toolchain packages, e.g.
# Kudu/kudu-python
if not have_toolchain(): return None
toolchain_gcc_dir = toolchain_pkg_dir("gcc")
cc = os.path.join(toolchain_gcc_dir, "bin/gcc")
if not os.path.exists(cc): return None
if use_ccache(): cc = "ccache %s" % cc
return cc
def exec_pip_install(args, cc="no-cc-available", env=None):
'''Executes "pip install" with the provided command line arguments. If 'cc' is set,
it is used as the C compiler. Otherwise compilation of C/C++ code is disabled by
setting the CC environment variable to a bogus value.
Other environment vars can optionally be set with the 'env' argument. By default the
current process's command line arguments are inherited.'''
if not env: env = dict(os.environ)
env["CC"] = cc
# Parallelize the slow numpy build.
# Use getconf instead of nproc because it is supported more widely, e.g. on older
# linux distributions.
env["NPY_NUM_BUILD_JOBS"] = exec_cmd(["getconf", "_NPROCESSORS_ONLN"]).strip()
# Don't call the virtualenv pip directly, it uses a hashbang to to call the python
# virtualenv using an absolute path. If the path to the virtualenv is very long, the
# hashbang won't work.
impala_pip_base_cmd = [os.path.join(ENV_DIR, "bin", "python"),
os.path.join(ENV_DIR, "bin", "pip"), "install", "-v"]
# Passes --no-binary for IMPALA-3767: without this, Cython (and
# several other packages) fail download.
#
# --no-cache-dir is used to prevent caching of compiled artifacts, which may be built
# with different compilers or settings.
third_party_pkg_install_cmd = \
impala_pip_base_cmd[:] + ["--no-binary", ":all:", "--no-cache-dir"]
# When using a custom mirror, we also must use the index of that mirror.
if "PYPI_MIRROR" in os.environ:
third_party_pkg_install_cmd.extend(["--index-url",
"%s/simple" % os.environ["PYPI_MIRROR"]])
else:
# Prevent fetching additional packages from the index. If we forget to add a package
# to one of the requirements.txt files, this should trigger an error. However, we will
# still access the index for version/dependency resolution, hence we need to change it
# when using a private mirror.
third_party_pkg_install_cmd.append("--no-index")
third_party_pkg_install_cmd.extend(["--find-links",
"file://%s" % urllib.pathname2url(os.path.abspath(DEPS_DIR))])
third_party_pkg_install_cmd.extend(args)
exec_cmd(third_party_pkg_install_cmd, env=env)
# Finally, we want to install the packages from our own internal python lib
local_package_install_cmd = impala_pip_base_cmd + \
['-e', os.path.join(os.getenv('IMPALA_HOME'), 'lib', 'python')]
exec_cmd(local_package_install_cmd)
def find_file(*paths):
'''Returns the path specified by the glob 'paths', raises an exception if no file is
found.
Ex: find_file('/etc', 'h*sts') --> /etc/hosts
'''
path = os.path.join(*paths)
files = glob.glob(path)
if len(files) > 1:
raise Exception("Found too many files at %s: %s" % (path, files))
if len(files) == 0:
raise Exception("No file found at %s" % path)
return files[0]
def detect_python_cmd():
'''Returns the system command that provides python 2.6 or greater.'''
paths = os.getenv("PATH").split(os.path.pathsep)
for cmd in ("python", "python27", "python2.7", "python-27", "python-2.7", "python26",
"python2.6", "python-26", "python-2.6"):
for path in paths:
cmd_path = os.path.join(path, cmd)
if not os.path.exists(cmd_path) or not os.access(cmd_path, os.X_OK):
continue
exit = subprocess.call([cmd_path, "-c", textwrap.dedent("""
import sys
sys.exit(int(sys.version_info[:2] < (2, 6)))""")])
if exit == 0:
return cmd_path
raise Exception("Could not find minimum required python version 2.6")
def install_deps():
LOG.info("Installing packages into the virtualenv")
exec_pip_install(["-r", REQS_PATH])
mark_reqs_installed(REQS_PATH)
LOG.info("Installing stage 2 packages into the virtualenv")
exec_pip_install(["-r", REQS2_PATH])
mark_reqs_installed(REQS2_PATH)
def have_toolchain():
'''Return true if the Impala toolchain is available'''
return "IMPALA_TOOLCHAIN" in os.environ
def toolchain_pkg_dir(pkg_name):
'''Return the path to the toolchain package'''
pkg_version = os.environ["IMPALA_" + pkg_name.upper() + "_VERSION"]
return os.path.join(os.environ["IMPALA_TOOLCHAIN"], pkg_name + "-" + pkg_version)
def install_compiled_deps_if_possible():
'''Install dependencies that require compilation with toolchain GCC, if the toolchain
is available. Returns true if the deps are installed'''
if reqs_are_installed(COMPILED_REQS_PATH):
LOG.debug("Skipping compiled deps: matching compiled-installed-requirements.txt found")
return True
cc = select_cc()
if cc is None:
LOG.debug("Skipping compiled deps: cc not available yet")
return False
env = dict(os.environ)
# Compilation of pycrypto fails on CentOS 5 with newer GCC versions because of a
# problem with inline declarations in older libc headers. Setting -fgnu89-inline is a
# workaround.
distro_version = ''.join(exec_cmd(["lsb_release", "-irs"]).lower().split())
print(distro_version)
if distro_version.startswith("centos5."):
env["CFLAGS"] = "-fgnu89-inline"
LOG.info("Installing compiled requirements into the virtualenv")
exec_pip_install(["-r", COMPILED_REQS_PATH], cc=cc, env=env)
mark_reqs_installed(COMPILED_REQS_PATH)
return True
def install_adls_deps():
# The ADLS dependencies require that the OS is at least CentOS 6.7 or above,
# which is why we break this into a seperate step. If the target filesystem is
# ADLS, the expectation is that the dev environment is running at least CentOS 6.7.
if os.environ.get('TARGET_FILESYSTEM') == "adls":
if reqs_are_installed(ADLS_REQS_PATH):
LOG.debug("Skipping ADLS deps: matching adls-installed-requirements.txt found")
return True
cc = select_cc()
assert cc is not None
LOG.info("Installing ADLS packages into the virtualenv")
exec_pip_install(["-r", ADLS_REQS_PATH], cc=cc)
mark_reqs_installed(ADLS_REQS_PATH)
def install_kudu_client_if_possible():
'''Installs the Kudu python module if possible, which depends on the toolchain and
the compiled requirements in compiled-requirements.txt. If the toolchain isn't
available, nothing will be done. Also nothing will be done if the Kudu client lib
required by the module isn't available (as determined by KUDU_IS_SUPPORTED)'''
if reqs_are_installed(KUDU_REQS_PATH):
LOG.debug("Skipping Kudu: matching kudu-installed-requirements.txt found")
return
if os.environ["KUDU_IS_SUPPORTED"] != "true":
LOG.debug("Skipping Kudu: Kudu is not supported")
return
kudu_base_dir = os.environ["IMPALA_KUDU_HOME"]
if not os.path.exists(kudu_base_dir):
LOG.debug("Skipping Kudu: %s doesn't exist" % kudu_base_dir)
return
LOG.info("Installing Kudu into the virtualenv")
# The installation requires that KUDU_HOME/build/latest exists. An empty directory
# structure will be made to satisfy that. The Kudu client headers and lib will be made
# available through GCC environment variables.
fake_kudu_build_dir = os.path.join(tempfile.gettempdir(), "virtualenv-kudu")
try:
artifact_dir = os.path.join(fake_kudu_build_dir, "build", "latest")
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir)
cc = select_cc()
assert cc is not None
env = dict(os.environ)
env["KUDU_HOME"] = fake_kudu_build_dir
kudu_client_dir = find_kudu_client_install_dir()
env["CPLUS_INCLUDE_PATH"] = os.path.join(kudu_client_dir, "include")
env["LIBRARY_PATH"] = os.path.pathsep.join([os.path.join(kudu_client_dir, 'lib'),
os.path.join(kudu_client_dir, 'lib64')])
exec_pip_install(["-r", KUDU_REQS_PATH], cc=cc, env=env)
mark_reqs_installed(KUDU_REQS_PATH)
finally:
try:
shutil.rmtree(fake_kudu_build_dir)
except Exception:
LOG.debug("Error removing temp Kudu build dir", exc_info=True)
def find_kudu_client_install_dir():
custom_client_dir = os.environ["KUDU_CLIENT_DIR"]
if custom_client_dir:
install_dir = os.path.join(custom_client_dir, "usr", "local")
error_if_kudu_client_not_found(install_dir)
else:
# If the toolchain appears to have been setup already, then the Kudu client is
# required to exist. It's possible that the toolchain won't be setup yet though
# since the toolchain bootstrap script depends on the virtualenv.
kudu_base_dir = os.environ["IMPALA_KUDU_HOME"]
install_dir = os.path.join(kudu_base_dir, "debug")
if os.path.exists(kudu_base_dir):
error_if_kudu_client_not_found(install_dir)
return install_dir
def error_if_kudu_client_not_found(install_dir):
header_path = os.path.join(install_dir, "include", "kudu", "client", "client.h")
if not os.path.exists(header_path):
raise Exception("Kudu client header not found at %s" % header_path)
kudu_client_lib = "libkudu_client.so"
lib_dir = os.path.join(install_dir, "lib64")
if not os.path.exists(lib_dir):
lib_dir = os.path.join(install_dir, "lib")
for _, _, files in os.walk(lib_dir):
for file in files:
if file == kudu_client_lib:
return
raise Exception("%s not found at %s" % (kudu_client_lib, lib_dir))
def mark_reqs_installed(reqs_path):
'''Mark that the requirements from the given file are installed by copying it into the root
directory of the virtualenv.'''
installed_reqs_path = os.path.join(ENV_DIR, os.path.basename(reqs_path))
shutil.copyfile(reqs_path, installed_reqs_path)
def reqs_are_installed(reqs_path):
'''Check if the requirements from the given file are installed in the virtualenv by
looking for a matching requirements file in the root directory of the virtualenv.'''
installed_reqs_path = os.path.join(ENV_DIR, os.path.basename(reqs_path))
if not os.path.exists(installed_reqs_path):
return False
installed_reqs_file = open(installed_reqs_path)
try:
reqs_file = open(reqs_path)
try:
if reqs_file.read() == installed_reqs_file.read():
return True
else:
LOG.debug("Virtualenv upgrade needed")
return False
finally:
reqs_file.close()
finally:
installed_reqs_file.close()
def setup_virtualenv_if_not_exists():
if not (reqs_are_installed(REQS_PATH) and reqs_are_installed(REQS2_PATH)):
delete_virtualenv_if_exist()
create_virtualenv()
install_deps()
LOG.debug("Virtualenv setup complete")
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-l", "--log-level", default="INFO",
choices=("DEBUG", "INFO", "WARN", "ERROR"))
parser.add_option("-r", "--rebuild", action="store_true", help="Force a rebuild of"
" the virtualenv even if it exists and appears to be completely up-to-date.")
parser.add_option("--print-ld-library-path", action="store_true", help="Print the"
" LD_LIBRARY_PATH that should be used when running python from the virtualenv.")
options, args = parser.parse_args()
if options.print_ld_library_path:
kudu_client_dir = find_kudu_client_install_dir()
print(os.path.pathsep.join([os.path.join(kudu_client_dir, 'lib'),
os.path.join(kudu_client_dir, 'lib64')]))
sys.exit()
logging.basicConfig(level=getattr(logging, options.log_level))
if options.rebuild:
delete_virtualenv_if_exist()
# Complete as many bootstrap steps as possible (see file comment for the steps).
setup_virtualenv_if_not_exists()
if install_compiled_deps_if_possible():
install_kudu_client_if_possible()
install_adls_deps()
|
|
"""
The frames module is composed of two classes, the Frame, and the FrameBuffer.
Each frame stores data on state, and the time the sample was received.
The FrameBuffer is a collection of Frames that we use for filtering and extrapolation functions.
"""
import math
import time
import numpy as np
from scipy import signal
from tower.map.dynamics import euler_from_quaternion
# todo: make this more extensible, i.e. specify the type of filter that should be implemented
def butter_lowpass(cutoff, fs, order=3):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='low', analog=True)
return b, a
def butter_lowpass_filter(b, a, data):
y = signal.lfilter(b, a, data)
return y
def quat2euler(q):
"""
Function for returning a set of Euler angles from a given quaternion. Uses a fixed rotation sequence.
:param q:
:return:
"""
qx, qy, qz, qw = q
sqx, sqy, sqz, sqw = q ** 2
invs = 1.0 / (sqx + sqy + sqz + sqw)
yaw = np.arctan2(2.0 * (qx * qz + qy * qw) * invs, (sqx - sqy - sqz + sqw) * invs)
pitch = -np.arcsin(2.0 * (qx * qy - qz * qw) * invs)
roll = np.arctan2(2.0 * (qy * qz + qx * qw) * invs, (-sqx + sqy - sqz + sqw) * invs)
return np.array((yaw, pitch, roll))
class Frame(object):
"""
The frame class is typically managed by a FrameBuffer, which instantiates a new Frame when it receives a call to
update.
A call to update means a new sample has arrived, and should be added to the buffer.
"""
def __init__(self, value=None):
self._frame_data = np.array(value)
self.time_stamp = time.time()
@property
def frame_data(self):
if self._frame_data is not None:
return self._frame_data
else:
return None
@frame_data.setter
def frame_data(self, value):
self._frame_data = np.array(value)
self.time_stamp = time.time()
@property
def detected(self):
if self._frame_data is not None:
return self._frame_data[-1]
else:
return None
@property
def state(self):
if self._frame_data is not None:
return self._frame_data[:6]
else:
return None
@property
def state_dict(self):
if self._frame_data is not None:
return self._frame_data[:6]
else:
return None
class FrameBuffer(object):
def __init__(self, extrapolating=False, filtering=False, **kwargs):
self.extrapolation_max = kwargs.get('extrapolation_max', 5)
self._cutoff, self._fs, self._order = kwargs.get('cutoff', 20), kwargs.get('fs', 120), kwargs.get('order', 4)
self.smooth_operator = np.array([0, 0, 0, 0, 0, 0]) # no need to ask...
self.current_frame = None
self.l_frame = None
self.ll_frame = None
self._filtered_frame = None
self.extrapolation_count = 0
self.prev_time = time.time()
self.filtering = filtering
self.extrapolating = extrapolating
if filtering:
self._b, self._a = butter_lowpass(self._cutoff, self._fs, order=self._order)
@property
def cutoff(self):
"""
The cutoff frequency of the filter. Implementation dependent.
:return: the cutoff of the filter
"""
return self._cutoff
@cutoff.setter
def cutoff(self, value):
"""
Cutoff property setter. Limit to positive frequencies, defualt to 8.
:param value: new value for cutoff
"""
if value > 0:
self._cutoff = value
else:
self._cutoff = 8 # default
@property
def filtered_frame(self):
"""
The filtered frame resulting from taking the latest value from filter output.
:return: the most current, filtered frame
"""
return self._filtered_frame
@filtered_frame.setter
def filtered_frame(self, value):
"""
Filtered frame property setter. Updates last_frame (l_frame) and last last frame (ll_frame)
:param value: the new value for the filtered frame
"""
self.ll_frame = self.l_frame
self.l_frame = self.filtered_frame
self._filtered_frame = value
@property
def can_extrapolate(self):
"""
Determine if extrapolation is valid based on given constraints. Prevents us from extrapolating forever if we've
left the capture volume, and also prevents us from extrapolating if we do not have enough previous information.
:return: True if we can extrapolate position, false if we cannot
"""
return self.l_frame is not None and self.ll_frame is not None and self.extrapolation_count \
< self.extrapolation_max
def extrapolate(self):
"""
Used to estimate the position of a body during occlusion, or missed frame events.
Based on the previous two valid frames, estimate velocity, and therefore position of the current dropped frame.
:return: the estimated state of the body, or none is extrapolation cannot be completed
"""
if self.can_extrapolate:
self.extrapolation_count += 1
dt = time.time() - self.prev_time # current time - last time we added a state to smooth operator
frame_velocity = (self.l_frame.state - self.ll_frame.state) / (
self.l_frame.time_stamp - self.ll_frame.time_stamp)
state = self.l_frame.state + frame_velocity * dt
return state
else:
self.extrapolation_count = 0
return None
def filter(self, state):
"""
Apply a filter to input states. Currently implements Butterworth - will be extended to variety of filters.
:param state: state array consisting of [x, y, z, yaw, roll, pitch]
:return:
"""
if state is not None: # not None if frame is valid, or we could extrapolate
self.smooth_operator = np.vstack((self.smooth_operator, state))
self.prev_time = time.time()
filtered = []
if self.smooth_operator.shape[0] > 250:
self.smooth_operator = np.delete(self.smooth_operator, 0, 0)
# Filter
for column in range(len(self.current_frame.state + 1)):
filt = butter_lowpass_filter(self._b, self._a, self.smooth_operator[:, column])
# print("Last", filt[-1])
filtered.append(filt[-1])
return filtered
return None
def decode_packet(self, packet):
"""
Read in a packed message from the ZMQ stream, deserialize using msgpack.
:param packet: Raw, serialized packet from ZMQ stream
:return: The state of the vehicle is given in an array of the form [x, y, z, yaw, roll, pitch, detected(bool)]
"""
detected = packet[-1]
delta = packet[-2]
x, y, z = packet[0], packet[1], packet[2]
x, y, z = -x, z, y
q = np.array([packet[3], packet[4], packet[5], packet[6]])
# np.linalg.norm(q) # (qx, qy, qz, qw)
# print("X:{}, Y:{}, Z:{}".format(x, y, z))
orientation = [elem * (180 / math.pi) for elem in euler_from_quaternion(q, axes='syxz')]
yaw, roll, pitch = orientation[0], orientation[1], orientation[2]
return [x, y, z, yaw, roll, pitch, detected]
def update(self, packet):
"""
Update the frame history by reading in new packets, constructing frame objects, and performing filtering and
extrapolation as desired. The packet is a serialized chunk of prepackaged data coming from a ZMQ REQ server
running on a machine running Motive, Optitracks GUI that doubles as a UDP packet server
:param packet: Serialized data from ZMQ stream
:return: A frame of data, appropriately extrapolated and or filtered if specified in __init__
"""
# Check if body is being tracked by cameras
self.current_frame = Frame(self.decode_packet(packet))
if self.current_frame.detected: # Unpack position, orientation add it to current frame, update frame history
state = self.current_frame.state
self.extrapolation_count = 0
else: # extrapolate
if self.extrapolating:
state = self.extrapolate()
else:
state = None
if state is not None:
if self.filtering:
state = self.filter(state)
print(state)
else:
pass
if state is not None:
self.filtered_frame = Frame(state)
# print self.filtered_frame.frame_data
return self.filtered_frame # indicate success in updating
else:
return None # id frame was not valid, and we cannot extrapolate
|
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def metric_accessors():
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
# regression
response_col = "economy"
distribution = "gaussian"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = H2OGradientBoostingEstimator(nfolds=3,
distribution=distribution,
fold_assignment="Random")
gbm.train(x=predictors, y=response_col, training_frame=train, validation_frame=valid)
# using list from http://docs.h2o.ai/h2o/latest-stable/h2o-docs/performance-and-prediction.html#regression
for metric in ['r2', 'mse', 'rmse', 'rmsle', 'mae']:
val = getattr(gbm, metric)()
assert isinstance(val, float), "expected a float for metric {} but got {}".format(metric, val)
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in list(mse.keys()) and "valid" in list(mse.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in list(mse.keys()) and "xval" in list(mse.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in list(mse.keys()) and "valid" in list(mse.keys()) and "xval" in list(mse.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in list(mse.keys()) and "xval" in list(mse.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# mean_residual_deviance
mean_residual_deviance1 = gbm.mean_residual_deviance(train=True, valid=False, xval=False)
assert isinstance(mean_residual_deviance1, float)
mean_residual_deviance2 = gbm.mean_residual_deviance(train=False, valid=True, xval=False)
assert isinstance(mean_residual_deviance2, float)
mean_residual_deviance3 = gbm.mean_residual_deviance(train=False, valid=False, xval=True)
assert isinstance(mean_residual_deviance3, float)
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=False)
assert "train" in list(mean_residual_deviance.keys()) and "valid" in list(mean_residual_deviance.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert len(mean_residual_deviance) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]))
assert mean_residual_deviance["valid"] == mean_residual_deviance2
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=False, xval=True)
assert "train" in list(mean_residual_deviance.keys()) and "xval" in list(mean_residual_deviance.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert len(mean_residual_deviance) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["xval"]))
assert mean_residual_deviance["xval"] == mean_residual_deviance3
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=True)
assert "train" in list(mean_residual_deviance.keys()) and "valid" in list(mean_residual_deviance.keys()) and "xval" in list(mean_residual_deviance.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert len(mean_residual_deviance) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mean_residual_deviance, float)
assert mean_residual_deviance == mean_residual_deviance1
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=True, xval=True)
assert "valid" in list(mean_residual_deviance.keys()) and "xval" in list(mean_residual_deviance.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert len(mean_residual_deviance) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
# binomial
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "economy_20mpg"
distribution = "bernoulli"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = H2OGradientBoostingEstimator(nfolds=3, distribution=distribution, fold_assignment="Random")
gbm.train(y=response_col, x=predictors, validation_frame=valid, training_frame=train)
# using list from http://docs.h2o.ai/h2o/latest-stable/h2o-docs/performance-and-prediction.html#classification
# + common ones
for metric in ['gini', 'logloss', 'auc', 'aucpr', 'mse', 'rmse']:
val = getattr(gbm, metric)()
assert isinstance(val, float), "expected a float for metric {} but got {}".format(metric, val)
for metric in ['mcc', 'F1', 'F0point5', 'F2', 'accuracy', 'mean_per_class_error']:
val = getattr(gbm, metric)()[0][1]
assert isinstance(val, float), "expected a float for metric {} but got {}".format(metric, val)
# auc
auc1 = gbm.auc(train=True, valid=False, xval=False)
assert isinstance(auc1, float)
auc2 = gbm.auc(train=False, valid=True, xval=False)
assert isinstance(auc2, float)
auc3 = gbm.auc(train=False, valid=False, xval=True)
assert isinstance(auc3, float)
auc = gbm.auc(train=True, valid=True, xval=False)
assert "train" in list(auc.keys()) and "valid" in list(auc.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert len(auc) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["valid"]))
assert auc["valid"] == auc2
auc = gbm.auc(train=True, valid=False, xval=True)
assert "train" in list(auc.keys()) and "xval" in list(auc.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert len(auc) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert isinstance(auc["train"], float) and isinstance(auc["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["xval"]))
assert auc["xval"] == auc3
auc = gbm.auc(train=True, valid=True, xval=True)
assert "train" in list(auc.keys()) and "valid" in list(auc.keys()) and "xval" in list(auc.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert len(auc) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(auc["train"]), type(auc["valid"]), type(auc["xval"]))
auc = gbm.auc(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(auc, float)
assert auc == auc1
auc = gbm.auc(train=False, valid=True, xval=True)
assert "valid" in list(auc.keys()) and "xval" in list(auc.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert len(auc) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["valid"]), type(auc["xval"]))
# roc
(fprs1, tprs1) = gbm.roc(train=True, valid=False, xval=False)
assert isinstance(fprs1, list)
assert isinstance(tprs1, list)
(fprs2, tprs2) = gbm.roc(train=False, valid=True, xval=False)
assert isinstance(fprs2, list)
assert isinstance(tprs2, list)
(fprs3, tprs3) = gbm.roc(train=False, valid=False, xval=True)
assert isinstance(fprs3, list)
assert isinstance(tprs3, list)
roc = gbm.roc(train=True, valid=True, xval=False)
assert "train" in list(roc.keys()) and "valid" in list(roc.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert len(roc) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert isinstance(roc["train"], tuple) and isinstance(roc["valid"], tuple), "expected training and validation metrics to be tuples, but got {0} and {1}".format(type(roc["train"]), type(roc["valid"]))
assert roc["valid"][0] == fprs2
assert roc["valid"][1] == tprs2
roc = gbm.roc(train=True, valid=False, xval=True)
assert "train" in list(roc.keys()) and "xval" in list(roc.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert len(roc) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert isinstance(roc["train"], tuple) and isinstance(roc["xval"], tuple), "expected training and cross validation metrics to be tuples, but got {0} and {1}".format(type(roc["train"]), type(roc["xval"]))
assert roc["xval"][0] == fprs3
assert roc["xval"][1] == tprs3
roc = gbm.roc(train=True, valid=True, xval=True)
assert "train" in list(roc.keys()) and "valid" in list(roc.keys()) and "xval" in list(roc.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert len(roc) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert isinstance(roc["train"], tuple) and isinstance(roc["valid"], tuple) and isinstance(roc["xval"], tuple), "expected training, validation, and cross validation metrics to be tuples, but got {0}, {1}, and {2}".format(type(roc["train"]), type(roc["valid"]), type(roc["xval"]))
(fprs, tprs) = gbm.roc(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(fprs, list)
assert isinstance(tprs, list)
assert fprs == fprs1
assert tprs == tprs1
roc = gbm.roc(train=False, valid=True, xval=True)
assert "valid" in list(roc.keys()) and "xval" in list(roc.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert len(roc) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert isinstance(roc["valid"], tuple) and isinstance(roc["xval"], tuple), "validation and cross validation metrics to be tuples, but got {0} and {1}".format(type(roc["valid"]), type(roc["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in list(logloss.keys()) and "valid" in list(logloss.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in list(logloss.keys()) and "valid" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# gini
gini1 = gbm.gini(train=True, valid=False, xval=False)
assert isinstance(gini1, float)
gini2 = gbm.gini(train=False, valid=True, xval=False)
assert isinstance(gini2, float)
gini3 = gbm.gini(train=False, valid=False, xval=True)
assert isinstance(gini3, float)
gini = gbm.gini(train=True, valid=True, xval=False)
assert "train" in list(gini.keys()) and "valid" in list(gini.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert len(gini) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert isinstance(gini["train"], float) and isinstance(gini["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(gini["train"]), type(gini["valid"]))
assert gini["valid"] == gini2
gini = gbm.gini(train=True, valid=False, xval=True)
assert "train" in list(gini.keys()) and "xval" in list(gini.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert len(gini) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert isinstance(gini["train"], float) and isinstance(gini["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(gini["train"]), type(gini["xval"]))
assert gini["xval"] == gini3
gini = gbm.gini(train=True, valid=True, xval=True)
assert "train" in list(gini.keys()) and "valid" in list(gini.keys()) and "xval" in list(gini.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert len(gini) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert isinstance(gini["train"], float) and isinstance(gini["valid"], float) and isinstance(gini["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(gini["train"]), type(gini["valid"]), type(gini["xval"]))
gini = gbm.gini(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(gini, float)
assert gini == gini1
gini = gbm.gini(train=False, valid=True, xval=True)
assert "valid" in list(gini.keys()) and "xval" in list(gini.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert len(gini) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert isinstance(gini["valid"], float) and isinstance(gini["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(gini["valid"]), type(gini["xval"]))
# F1
F11 = gbm.F1(train=True, valid=False, xval=False)
F12 = gbm.F1(train=False, valid=True, xval=False)
F13 = gbm.F1(train=False, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=False)
F1 = gbm.F1(train=True, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=True)
F1 = gbm.F1(train=False, valid=False, xval=False) # default: return training metrics
F1 = gbm.F1(train=False, valid=True, xval=True)
# F0point5
F0point51 = gbm.F0point5(train=True, valid=False, xval=False)
F0point52 = gbm.F0point5(train=False, valid=True, xval=False)
F0point53 = gbm.F0point5(train=False, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=False)
F0point5 = gbm.F0point5(train=True, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=True)
F0point5 = gbm.F0point5(train=False, valid=False, xval=False) # default: return training metrics
F0point5 = gbm.F0point5(train=False, valid=True, xval=True)
# F2
F21 = gbm.F2(train=True, valid=False, xval=False)
F22 = gbm.F2(train=False, valid=True, xval=False)
F23 = gbm.F2(train=False, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=False)
F2 = gbm.F2(train=True, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=True)
F2 = gbm.F2(train=False, valid=False, xval=False) # default: return training metrics
F2 = gbm.F2(train=False, valid=True, xval=True)
# accuracy
accuracy1 = gbm.accuracy(train=True, valid=False, xval=False)
accuracy2 = gbm.accuracy(train=False, valid=True, xval=False)
accuracy3 = gbm.accuracy(train=False, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=False)
accuracy = gbm.accuracy(train=True, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=True)
accuracy = gbm.accuracy(train=False, valid=False, xval=False) # default: return training metrics
accuracy = gbm.accuracy(train=False, valid=True, xval=True)
# error
error1 = gbm.error(train=True, valid=False, xval=False)
error2 = gbm.error(train=False, valid=True, xval=False)
error3 = gbm.error(train=False, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=False)
error = gbm.error(train=True, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=True)
error = gbm.error(train=False, valid=False, xval=False) # default: return training metrics
error = gbm.error(train=False, valid=True, xval=True)
# precision
precision1 = gbm.precision(train=True, valid=False, xval=False)
precision2 = gbm.precision(train=False, valid=True, xval=False)
precision3 = gbm.precision(train=False, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=False)
precision = gbm.precision(train=True, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=True)
precision = gbm.precision(train=False, valid=False, xval=False) # default: return training metrics
precision = gbm.precision(train=False, valid=True, xval=True)
# mcc
mcc1 = gbm.mcc(train=True, valid=False, xval=False)
mcc2 = gbm.mcc(train=False, valid=True, xval=False)
mcc3 = gbm.mcc(train=False, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=False)
mcc = gbm.mcc(train=True, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=True)
mcc = gbm.mcc(train=False, valid=False, xval=False) # default: return training metrics
mcc = gbm.mcc(train=False, valid=True, xval=True)
# max_per_class_error
max_per_class_error1 = gbm.max_per_class_error(train=True, valid=False, xval=False)
max_per_class_error2 = gbm.max_per_class_error(train=False, valid=True, xval=False)
max_per_class_error3 = gbm.max_per_class_error(train=False, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=False)
max_per_class_error = gbm.max_per_class_error(train=True, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=True)
max_per_class_error = gbm.max_per_class_error(train=False, valid=False, xval=False) # default: return training metrics
max_per_class_error = gbm.max_per_class_error(train=False, valid=True, xval=True)
# mean_per_class_error
mean_per_class_error1 = gbm.mean_per_class_error(train=True, valid=False, xval=False)
mean_per_class_error2 = gbm.mean_per_class_error(train=False, valid=True, xval=False)
mean_per_class_error3 = gbm.mean_per_class_error(train=False, valid=False, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=True, xval=False)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=False, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=True, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=False, valid=False, xval=False) # default: return training metrics
mean_per_class_error = gbm.mean_per_class_error(train=False, valid=True, xval=True)
# confusion_matrix
confusion_matrix1 = gbm.confusion_matrix(train=True, valid=False, xval=False)
confusion_matrix2 = gbm.confusion_matrix(train=False, valid=True, xval=False)
confusion_matrix3 = gbm.confusion_matrix(train=False, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=False)
confusion_matrix = gbm.confusion_matrix(train=True, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=True)
confusion_matrix = gbm.confusion_matrix(train=False, valid=False, xval=False) # default: return training metrics
confusion_matrix = gbm.confusion_matrix(train=False, valid=True, xval=True)
# # plot
# plot1 = gbm.plot(train=True, valid=False, xval=False)
# plot2 = gbm.plot(train=False, valid=True, xval=False)
# plot3 = gbm.plot(train=False, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=False)
# plot = gbm.plot(train=True, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=True)
# plot = gbm.plot(train=False, valid=False, xval=False) # default: return training metrics
# plot = gbm.plot(train=False, valid=True, xval=True)
# # tpr
tpr1 = gbm.tpr(train=True, valid=False, xval=False)
tpr2 = gbm.tpr(train=False, valid=True, xval=False)
tpr3 = gbm.tpr(train=False, valid=False, xval=True)
tpr = gbm.tpr(train=True, valid=True, xval=False)
tpr = gbm.tpr(train=True, valid=False, xval=True)
tpr = gbm.tpr(train=True, valid=True, xval=True)
tpr = gbm.tpr(train=False, valid=False, xval=False) # default: return training metrics
tpr = gbm.tpr(train=False, valid=True, xval=True)
#
# # tnr
tnr1 = gbm.tnr(train=True, valid=False, xval=False)
tnr2 = gbm.tnr(train=False, valid=True, xval=False)
tnr3 = gbm.tnr(train=False, valid=False, xval=True)
tnr = gbm.tnr(train=True, valid=True, xval=False)
tnr = gbm.tnr(train=True, valid=False, xval=True)
tnr = gbm.tnr(train=True, valid=True, xval=True)
tnr = gbm.tnr(train=False, valid=False, xval=False) # default: return training metrics
tnr = gbm.tnr(train=False, valid=True, xval=True)
#
# # fnr
fnr1 = gbm.fnr(train=True, valid=False, xval=False)
fnr2 = gbm.fnr(train=False, valid=True, xval=False)
fnr3 = gbm.fnr(train=False, valid=False, xval=True)
fnr = gbm.fnr(train=True, valid=True, xval=False)
fnr = gbm.fnr(train=True, valid=False, xval=True)
fnr = gbm.fnr(train=True, valid=True, xval=True)
fnr = gbm.fnr(train=False, valid=False, xval=False) # default: return training metrics
fnr = gbm.fnr(train=False, valid=True, xval=True)
#
# # fpr
fpr1 = gbm.fpr(train=True, valid=False, xval=False)
fpr2 = gbm.fpr(train=False, valid=True, xval=False)
fpr3 = gbm.fpr(train=False, valid=False, xval=True)
fpr = gbm.fpr(train=True, valid=True, xval=False)
fpr = gbm.fpr(train=True, valid=False, xval=True)
fpr = gbm.fpr(train=True, valid=True, xval=True)
fpr = gbm.fpr(train=False, valid=False, xval=False) # default: return training metrics
fpr = gbm.fpr(train=False, valid=True, xval=True)
# multinomial
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
cars["cylinders"] = cars["cylinders"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "cylinders"
distribution = "multinomial"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = H2OGradientBoostingEstimator(nfolds=3, distribution=distribution, fold_assignment="Random")
gbm.train(x=predictors,y=response_col, training_frame=train, validation_frame=valid)
# using list from http://docs.h2o.ai/h2o/latest-stable/h2o-docs/performance-and-prediction.html#classification
# + common ones
for metric in ['logloss', 'mse', 'rmse', 'mean_per_class_error']:
val = getattr(gbm, metric)()
assert isinstance(val, float), "expected a float for metric {} but got {}".format(metric, val)
# for metric in []:
# val = getattr(gbm, metric)()[0][1]
# assert isinstance(val, float), "expected a float for metric {} but got {}".format(metric, val)
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in list(mse.keys()) and "valid" in list(mse.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in list(mse.keys()) and "xval" in list(mse.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in list(mse.keys()) and "valid" in list(mse.keys()) and "xval" in list(mse.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in list(mse.keys()) and "xval" in list(mse.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in list(logloss.keys()) and "valid" in list(logloss.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in list(logloss.keys()) and "valid" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# hit_ratio_table
hit_ratio_table1 = gbm.hit_ratio_table(train=True, valid=False, xval=False)
hit_ratio_table2 = gbm.hit_ratio_table(train=False, valid=True, xval=False)
hit_ratio_table3 = gbm.hit_ratio_table(train=False, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=False)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=False, xval=False) # default: return training metrics
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=True, xval=True)
# mean_per_class_error
mean_per_class_error1 = gbm.mean_per_class_error(train=True, valid=False, xval=False)
mean_per_class_error2 = gbm.mean_per_class_error(train=False, valid=True, xval=False)
mean_per_class_error3 = gbm.mean_per_class_error(train=False, valid=False, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=True, xval=False)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=False, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=True, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=False, valid=False, xval=False) # default: return training metrics
mean_per_class_error = gbm.mean_per_class_error(train=False, valid=True, xval=True)
# clustering
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
from h2o.estimators.kmeans import H2OKMeansEstimator
km = H2OKMeansEstimator(k=3, nfolds=3)
km.train(x=list(range(4)), training_frame=iris)
# betweenss
betweenss1 = km.betweenss(train=True, valid=False, xval=False)
assert isinstance(betweenss1, float)
betweenss3 = km.betweenss(train=False, valid=False, xval=True)
assert isinstance(betweenss3, float)
betweenss = km.betweenss(train=True, valid=False, xval=True)
assert "train" in list(betweenss.keys()) and "xval" in list(betweenss.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(betweenss.keys()))
assert len(betweenss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(betweenss.keys()))
assert isinstance(betweenss["train"], float) and isinstance(betweenss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(betweenss["train"]), type(betweenss["xval"]))
assert betweenss["xval"] == betweenss3
betweenss = km.betweenss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(betweenss, float)
assert betweenss == betweenss1
# totss
totss1 = km.totss(train=True, valid=False, xval=False)
assert isinstance(totss1, float)
totss3 = km.totss(train=False, valid=False, xval=True)
assert isinstance(totss3, float)
totss = km.totss(train=True, valid=False, xval=True)
assert "train" in list(totss.keys()) and "xval" in list(totss.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(totss.keys()))
assert len(totss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(totss.keys()))
assert isinstance(totss["train"], float) and isinstance(totss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(totss["train"]), type(totss["xval"]))
assert totss["xval"] == totss3
totss = km.totss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(totss, float)
assert totss == totss1
# tot_withinss
tot_withinss1 = km.tot_withinss(train=True, valid=False, xval=False)
assert isinstance(tot_withinss1, float)
tot_withinss3 = km.tot_withinss(train=False, valid=False, xval=True)
assert isinstance(tot_withinss3, float)
tot_withinss = km.tot_withinss(train=True, valid=False, xval=True)
assert "train" in list(tot_withinss.keys()) and "xval" in list(tot_withinss.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(tot_withinss.keys()))
assert len(tot_withinss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(tot_withinss.keys()))
assert isinstance(tot_withinss["train"], float) and isinstance(tot_withinss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(tot_withinss["train"]), type(tot_withinss["xval"]))
assert tot_withinss["xval"] == tot_withinss3
tot_withinss = km.tot_withinss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(tot_withinss, float)
assert tot_withinss == tot_withinss1
# withinss
withinss1 = km.withinss(train=True, valid=False)
withinss2 = km.withinss(train=True, valid=True)
withinss3 = km.withinss(train=False, valid=False) # default: return training metrics
assert withinss1 == withinss3
assert withinss1 != withinss2
# centroid_stats
centroid_stats1 = km.centroid_stats(train=True, valid=False)
centroid_stats2 = km.centroid_stats(train=True, valid=True)
centroid_stats3 = km.centroid_stats(train=False, valid=False) # default: return training metrics
assert centroid_stats1 == centroid_stats3
assert centroid_stats1 != centroid_stats2
# size
size1 = km.size(train=True, valid=False)
size2 = km.size(train=True, valid=True)
size3 = km.size(train=False, valid=False) # default: return training metrics
assert size1 == size3
assert size1 != size2
if __name__ == "__main__":
pyunit_utils.standalone_test(metric_accessors)
else:
metric_accessors()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.